blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1925c14fb7975bde7b1a13295e923c9cc4a022d7 | 338be5c20c24e10f11f0fea4a1a156dc4e4a9922 | /greenleaf/config/gunicorn.conf.py | ff0f6aab1ec9aec6ec500de100e1811f081f1fa0 | [] | no_license | Jimiliani/my_python | a532c332683a0f795bff5ed6b15db5c961e017d4 | 28f078d9499854b2b09fbd50686beb8cfdc12227 | refs/heads/master | 2022-10-27T08:38:04.449331 | 2020-10-06T10:41:10 | 2020-10-06T10:41:10 | 241,277,471 | 0 | 1 | null | 2022-10-15T16:06:02 | 2020-02-18T05:07:19 | Python | UTF-8 | Python | false | false | 66 | py | bind = '127.0.0.1:8000'
workers = 2
user = 'dima'
timeout = 60
| [
"dikorolyov@mail.ru"
] | dikorolyov@mail.ru |
d14bf3bde060c5cda07a825296dee074f729f51f | 5db0fab37c2b8a618d85d3b60fab9f806c416474 | /src/python/pants/backend/go/util_rules/build_pkg_test.py | be4d1f7e5dab9561e76f8e82e785b6d2d8bc7090 | [
"Apache-2.0"
] | permissive | pantsbuild/pants | 4988d1ac5474ec95f94ce2218aeb759401e4b011 | 98cbda8545f0d58c586ed2daa76fefd729d5e0d5 | refs/heads/main | 2023-09-05T03:44:17.646899 | 2023-09-01T19:52:09 | 2023-09-01T19:52:09 | 7,209,075 | 2,708 | 593 | Apache-2.0 | 2023-09-14T19:33:33 | 2012-12-17T17:39:04 | Python | UTF-8 | Python | false | false | 6,371 | py | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os.path
from textwrap import dedent
import pytest
from pants.backend.go import target_type_rules
from pants.backend.go.target_types import GoModTarget
from pants.backend.go.util_rules import (
assembly,
build_pkg,
first_party_pkg,
go_mod,
import_analysis,
link,
sdk,
third_party_pkg,
)
from pants.backend.go.util_rules.build_opts import GoBuildOptions
from pants.backend.go.util_rules.build_pkg import (
BuildGoPackageRequest,
BuiltGoPackage,
FallibleBuiltGoPackage,
)
from pants.engine.fs import Snapshot
from pants.engine.rules import QueryRule
from pants.testutil.rule_runner import RuleRunner
from pants.util.strutil import path_safe
@pytest.fixture
def rule_runner() -> RuleRunner:
rule_runner = RuleRunner(
rules=[
*sdk.rules(),
*assembly.rules(),
*build_pkg.rules(),
*import_analysis.rules(),
*go_mod.rules(),
*first_party_pkg.rules(),
*link.rules(),
*third_party_pkg.rules(),
*target_type_rules.rules(),
QueryRule(BuiltGoPackage, [BuildGoPackageRequest]),
QueryRule(FallibleBuiltGoPackage, [BuildGoPackageRequest]),
],
target_types=[GoModTarget],
)
rule_runner.set_options([], env_inherit={"PATH"})
return rule_runner
def assert_built(
rule_runner: RuleRunner, request: BuildGoPackageRequest, *, expected_import_paths: list[str]
) -> None:
built_package = rule_runner.request(BuiltGoPackage, [request])
result_files = rule_runner.request(Snapshot, [built_package.digest]).files
expected = {
import_path: os.path.join("__pkgs__", path_safe(import_path), "__pkg__.a")
for import_path in expected_import_paths
}
assert dict(built_package.import_paths_to_pkg_a_files) == expected
assert sorted(result_files) == sorted(expected.values())
def test_build_pkg(rule_runner: RuleRunner) -> None:
transitive_dep = BuildGoPackageRequest(
import_path="example.com/foo/dep/transitive",
pkg_name="transitive",
dir_path="dep/transitive",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot(
{
"dep/transitive/f.go": dedent(
"""\
package transitive
func Quote(s string) string {
return ">>" + s + "<<"
}
"""
)
}
).digest,
s_files=(),
direct_dependencies=(),
minimum_go_version=None,
)
direct_dep = BuildGoPackageRequest(
import_path="example.com/foo/dep",
pkg_name="dep",
dir_path="dep",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot(
{
"dep/f.go": dedent(
"""\
package dep
import "example.com/foo/dep/transitive"
func Quote(s string) string {
return transitive.Quote(s)
}
"""
)
}
).digest,
s_files=(),
direct_dependencies=(transitive_dep,),
minimum_go_version=None,
)
main = BuildGoPackageRequest(
import_path="example.com/foo",
pkg_name="foo",
dir_path="",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot(
{
"f.go": dedent(
"""\
package foo
import "example.com/foo/dep"
func main() {
dep.Quote("Hello world!")
}
"""
)
}
).digest,
s_files=(),
direct_dependencies=(direct_dep,),
minimum_go_version=None,
)
assert_built(
rule_runner, transitive_dep, expected_import_paths=["example.com/foo/dep/transitive"]
)
assert_built(
rule_runner,
direct_dep,
expected_import_paths=["example.com/foo/dep", "example.com/foo/dep/transitive"],
)
assert_built(
rule_runner,
main,
expected_import_paths=[
"example.com/foo",
"example.com/foo/dep",
"example.com/foo/dep/transitive",
],
)
def test_build_invalid_pkg(rule_runner: RuleRunner) -> None:
invalid_dep = BuildGoPackageRequest(
import_path="example.com/foo/dep",
pkg_name="dep",
dir_path="dep",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot({"dep/f.go": "invalid!!!"}).digest,
s_files=(),
direct_dependencies=(),
minimum_go_version=None,
)
main = BuildGoPackageRequest(
import_path="example.com/foo",
pkg_name="main",
dir_path="",
build_opts=GoBuildOptions(),
go_files=("f.go",),
digest=rule_runner.make_snapshot(
{
"f.go": dedent(
"""\
package main
import "example.com/foo/dep"
func main() {
dep.Quote("Hello world!")
}
"""
)
}
).digest,
s_files=(),
direct_dependencies=(invalid_dep,),
minimum_go_version=None,
)
invalid_direct_result = rule_runner.request(FallibleBuiltGoPackage, [invalid_dep])
assert invalid_direct_result.output is None
assert invalid_direct_result.exit_code == 1
assert (
invalid_direct_result.stdout
== "dep/f.go:1:1: syntax error: package statement must be first\n"
)
invalid_dep_result = rule_runner.request(FallibleBuiltGoPackage, [main])
assert invalid_dep_result.output is None
assert invalid_dep_result.exit_code == 1
assert (
invalid_dep_result.stdout == "dep/f.go:1:1: syntax error: package statement must be first\n"
)
| [
"noreply@github.com"
] | pantsbuild.noreply@github.com |
f6bc336f85c826b416c7a82c6d5707a2e558c142 | cad999eacee16dc0e001a57f50b5d8b0f4d4ebf6 | /p202.py | a2cc8d0b585a2940e0c568ce938cd4db057be5f3 | [] | no_license | divyanarra0/pythonprogram | 8694a41ba3b39eb44a94a693eac3f7f5f18b588b | 10d8f59a472ccd4548771bad29be84a1a44854d8 | refs/heads/master | 2020-03-27T10:32:21.664657 | 2019-05-14T07:31:00 | 2019-05-14T07:31:00 | 146,427,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 723 | py | def isVowel(c):
c = c.lower()
if (c == 'a' or c == 'e' or
c == 'i' or c == 'o' or c == 'u'):
return True
return False
# Function to return first X vowels
def firstXvowels(s, x):
# String to store first X vowels
result = ""
for i in range(0, len(s), 1):
# If s[i] is a vowel then
# append it to the result
if (isVowel(s[i])):
result += s[i]
# If the desired length is reached
if (len(result) == x):
return result
# If total vowels are < X
return "-1"
# Driver code
if __name__ == '__main__':
str = "asdaqrew"
x = 3
print(firstXvowels(str, x))
| [
"noreply@github.com"
] | divyanarra0.noreply@github.com |
7dc0c27e821890eced9d0802e8432f93546a7563 | 3ce946b7fac93c237a073c5395ba2f3d293a3c52 | /src/cosmosdb-preview/azext_cosmosdb_preview/vendored_sdks/azure_mgmt_cosmosdb/operations/_collection_operations.py | cb47c68b013ff541760c6fa0f4aa019997d88c17 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | imabedalghafer/azure-cli-extensions | a7e05873aaf1bfa164e89f8fe80a80e7240abc78 | 017848c33388d48b382414db66656965f1c1874f | refs/heads/main | 2022-11-06T11:43:14.960651 | 2022-10-17T12:12:55 | 2022-10-17T12:12:55 | 403,272,601 | 2 | 0 | MIT | 2021-09-05T09:59:12 | 2021-09-05T09:59:11 | null | UTF-8 | Python | false | false | 21,317 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_metrics_request(
resource_group_name: str,
account_name: str,
database_rid: str,
collection_rid: str,
subscription_id: str,
*,
filter: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-15-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/collections/{collectionRid}/metrics",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"databaseRid": _SERIALIZER.url("database_rid", database_rid, "str"),
"collectionRid": _SERIALIZER.url("collection_rid", collection_rid, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_usages_request(
resource_group_name: str,
account_name: str,
database_rid: str,
collection_rid: str,
subscription_id: str,
*,
filter: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-15-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/collections/{collectionRid}/usages",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"databaseRid": _SERIALIZER.url("database_rid", database_rid, "str"),
"collectionRid": _SERIALIZER.url("collection_rid", collection_rid, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
if filter is not None:
_params["$filter"] = _SERIALIZER.query("filter", filter, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_metric_definitions_request(
resource_group_name: str,
account_name: str,
database_rid: str,
collection_rid: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-08-15-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/collections/{collectionRid}/metricDefinitions",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"accountName": _SERIALIZER.url(
"account_name", account_name, "str", max_length=50, min_length=3, pattern=r"^[a-z0-9]+(-[a-z0-9]+)*"
),
"databaseRid": _SERIALIZER.url("database_rid", database_rid, "str"),
"collectionRid": _SERIALIZER.url("collection_rid", collection_rid, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class CollectionOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.cosmosdb.CosmosDBManagementClient`'s
:attr:`collection` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_metrics(
self,
resource_group_name: str,
account_name: str,
database_rid: str,
collection_rid: str,
filter: str,
**kwargs: Any
) -> Iterable["_models.Metric"]:
"""Retrieves the metrics determined by the given filter for the given database account and
collection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_rid: Cosmos DB database rid. Required.
:type database_rid: str
:param collection_rid: Cosmos DB collection rid. Required.
:type collection_rid: str
:param filter: An OData filter expression that describes a subset of metrics to return. The
parameters that can be filtered are name.value (name of the metric, can have an or of multiple
names), startTime, endTime, and timeGrain. The supported operator is eq. Required.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Metric or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.Metric]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.MetricListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_metrics_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_rid=database_rid,
collection_rid=collection_rid,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list_metrics.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("MetricListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_metrics.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/collections/{collectionRid}/metrics"} # type: ignore
@distributed_trace
def list_usages(
self,
resource_group_name: str,
account_name: str,
database_rid: str,
collection_rid: str,
filter: Optional[str] = None,
**kwargs: Any
) -> Iterable["_models.Usage"]:
"""Retrieves the usages (most recent storage data) for the given collection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_rid: Cosmos DB database rid. Required.
:type database_rid: str
:param collection_rid: Cosmos DB collection rid. Required.
:type collection_rid: str
:param filter: An OData filter expression that describes a subset of usages to return. The
supported parameter is name.value (name of the metric, can have an or of multiple names).
Default value is None.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Usage or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.Usage]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.UsagesResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_usages_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_rid=database_rid,
collection_rid=collection_rid,
subscription_id=self._config.subscription_id,
filter=filter,
api_version=api_version,
template_url=self.list_usages.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("UsagesResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_usages.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/collections/{collectionRid}/usages"} # type: ignore
@distributed_trace
def list_metric_definitions(
self, resource_group_name: str, account_name: str, database_rid: str, collection_rid: str, **kwargs: Any
) -> Iterable["_models.MetricDefinition"]:
"""Retrieves metric definitions for the given collection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param account_name: Cosmos DB database account name. Required.
:type account_name: str
:param database_rid: Cosmos DB database rid. Required.
:type database_rid: str
:param collection_rid: Cosmos DB collection rid. Required.
:type collection_rid: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MetricDefinition or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.cosmosdb.models.MetricDefinition]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.MetricDefinitionsListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_metric_definitions_request(
resource_group_name=resource_group_name,
account_name=account_name,
database_rid=database_rid,
collection_rid=collection_rid,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_metric_definitions.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("MetricDefinitionsListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_metric_definitions.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DocumentDB/databaseAccounts/{accountName}/databases/{databaseRid}/collections/{collectionRid}/metricDefinitions"} # type: ignore
| [
"noreply@github.com"
] | imabedalghafer.noreply@github.com |
7e83a73e874a3f888d9ee5d37a158ac37bfc2f6d | d3ce122feb39fcd260fbcb49fbeb7a7557888416 | /scripts/keys.py | 44d08346943bee524ebf2bc4cd3750ebf359a799 | [] | no_license | HackTheDeep/map-collections-geocoding | 7ef5d18964a330501ec3a36584fec4669b9fbb1a | e95cccd180e1cf788269488bb18c92fbf9c0f9c3 | refs/heads/master | 2022-12-12T22:59:40.949808 | 2018-02-13T04:36:53 | 2018-02-13T04:36:53 | 121,053,371 | 4 | 1 | null | 2022-12-08T00:53:21 | 2018-02-10T20:53:31 | Python | UTF-8 | Python | false | false | 153 | py | googleKey='AIzaSyA3k5SpAOOdB0Bw92ZYcHDb6GTzxlJvMBg'
geoCodeKey='oceanocoders'
bingKey='AjEl9PcIPe1ZAyZfPf2fdpIy8vTMI02tNGE61HUs1-LtOKV_Zs6R93UThhEp9Bhw'
| [
"tyler.cummings.bond@gmail.com"
] | tyler.cummings.bond@gmail.com |
1788289d66ed3776b3faa636f2a39e4c2ee2cda3 | 533978a9b5a87ce9fc221e76c52f5c54b9ae46c7 | /helper_scripts/compare_train_vs_test_localizations.py | 8650da3f30e36a7d3964669ada24c6272d1f362a | [
"Apache-2.0"
] | permissive | grapeup/ubaar-competition | 00009184949dfb0cd97bbf52016d903defe2331e | 28de972d6beb13343c537fc030101be672a852a3 | refs/heads/main | 2023-03-12T22:57:21.909099 | 2021-02-19T15:09:50 | 2021-02-19T15:09:50 | 338,268,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | from feature_extraction.path_utils import project_root
import plotly.express as px
import pandas as pd
import os
if __name__ == '__main__':
train = pd.read_csv(os.path.join(project_root(), 'data', 'raw', 'ubaar-competition', 'train.csv'),
encoding="utf-8", index_col="ID")
test = pd.read_csv(os.path.join(project_root(), 'data', 'raw', 'ubaar-competition', 'test.csv'),
encoding="utf-8", index_col="ID")
train['test'] = False
test['test'] = True
train = train.append(test)
coords = train[["sourceLatitude", "sourceLongitude", "destinationLatitude", "destinationLongitude", "test"]]
fig = px.scatter_mapbox(coords, lat="sourceLatitude", lon="sourceLongitude",
color='test', title="Train/test data coordinates")
fig.update_layout(mapbox_style="stamen-terrain", mapbox_zoom=2, mapbox_center_lat=41,
margin={"r": 0, "t": 0, "l": 0, "b": 0})
fig.write_html(os.path.join(project_root(), "data", "processed", "train_test.html"))
fig.show()
| [
"smolendawid@gmail.com"
] | smolendawid@gmail.com |
795e893e6139d4efd30d9e2724f3d4e25c02973e | c4a496ce1d0556b44c00b2625dc77174424785ae | /timer.py | d07269688900649ebc79e2a541f03d879802de11 | [] | no_license | T0aD/pyawstats | bf26f210c0834fdc1ec33b3bff142e9b2f303793 | 15a4997fd2a1ff6f330c948545385cc02b2d79d6 | refs/heads/master | 2021-01-19T13:50:10.805043 | 2014-04-25T18:57:00 | 2014-04-25T18:57:00 | 4,284,036 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,266 | py | import time
class timeit():
def __init__(self):
self.start = time.time()
self.save()
def save(self):
self.current = time.time()
def show(self, msg):
diff1 = '%.02f' % ((time.time() - self.start) * 1000)
diff2 = '%.02f' % ((time.time() - self.current) * 1000)
format = 'TIMER: %9s - %9s ms > %s'
print(format % (diff1, diff2, msg))
self.save()
class timer:
av = []
def __init__(self, lines = 0):
self.time, self.lines = (time.time(), lines)
def average(self):
if not len(self.av):
return
total = 0
count = 0
for av in self.av:
total += av
count += 1
print('Average: %5d lines per sec' % (total / count, ))
def stop(self, msg, lines, size = False):
ct = time.time()
dt = ct - self.time
dl = lines - self.lines
# print('%f - %f = %f' % (lines, self.lines, dl))
self.lines = lines
self.time = ct
lps = dl / dt
info = '%5d l/s' % (lps,)
self.av.append(int(lps))
if dt < 0.5:
print('%s took %0.2f ms %s' % (msg, dt * 1000, info))
else:
print('%s took %0.2f s %s' % (msg, dt, info))
| [
"julien.perez@epsylonia.net"
] | julien.perez@epsylonia.net |
f3daaf16e9541e5d5cf9d66937ef0f7fec46e07e | ccc0f103b66343dfb8b446c24c1f8f4fd4e8024a | /Leetcode/LeetCode/980-unique-paths-III.py | 9c556698dc638180b0ab0788b397f1c7d19713f3 | [] | no_license | eli-byers/JS_algos | ed17db33d817a226e09627234296d2ffcee2ec93 | e2b1baedc75b33e93bd446d1c428f385f13b93a0 | refs/heads/master | 2021-03-27T09:24:06.125536 | 2019-07-07T23:38:05 | 2019-07-07T23:38:05 | 57,906,239 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | '''
On a 2-dimensional grid, there are 4 types of squares:
1 represents the starting square. There is exactly one starting square.
2 represents the ending square. There is exactly one ending square.
0 represents empty squares we can walk over.
-1 represents obstacles that we cannot walk over.
Return the number of 4-directional walks from the starting square to the ending square, that walk over every non-obstacle square exactly once.
Example 1:
Input: [[1,0,0,0],[0,0,0,0],[0,0,2,-1]]
Output: 2
Explanation: We have the following two paths:
1. (0,0),(0,1),(0,2),(0,3),(1,3),(1,2),(1,1),(1,0),(2,0),(2,1),(2,2)
2. (0,0),(1,0),(2,0),(2,1),(1,1),(0,1),(0,2),(0,3),(1,3),(1,2),(2,2)
Example 2:
Input: [[1,0,0,0],[0,0,0,0],[0,0,0,2]]
Output: 4
Explanation: We have the following four paths:
1. (0,0),(0,1),(0,2),(0,3),(1,3),(1,2),(1,1),(1,0),(2,0),(2,1),(2,2),(2,3)
2. (0,0),(0,1),(1,1),(1,0),(2,0),(2,1),(2,2),(1,2),(0,2),(0,3),(1,3),(2,3)
3. (0,0),(1,0),(2,0),(2,1),(2,2),(1,2),(1,1),(0,1),(0,2),(0,3),(1,3),(2,3)
4. (0,0),(1,0),(2,0),(2,1),(1,1),(0,1),(0,2),(0,3),(1,3),(1,2),(2,2),(2,3)
Example 3:
Input: [[0,1],[2,0]]
Output: 0
Explanation:
There is no path that walks over every empty square exactly once.
Note that the starting and ending square can be anywhere in the grid.
Note:
1 <= grid.length * grid[0].length <= 20
'''
| [
"elibyers.dev@gmail.com"
] | elibyers.dev@gmail.com |
751cf05a4a081982c332d1d32c6bfbd742ac75f9 | 40ca01569e9c8ed6d2312447fac604229bdeace3 | /fabfile.py | bb0342de308a2dc2d08064b855fa24d83163edb7 | [
"MIT"
] | permissive | deniskrumko/izyan-poker | c393c9c4cb401d3180a97075fde59ff2e371a77d | ce70c9c8f761409adad289809e5220237b312407 | refs/heads/master | 2021-06-14T08:59:03.364660 | 2020-02-11T06:48:00 | 2020-02-11T06:48:00 | 200,377,031 | 7 | 2 | MIT | 2021-06-10T18:43:43 | 2019-08-03T13:11:06 | Python | UTF-8 | Python | true | false | 3,424 | py | from fabric.api import task, local
def print_msg(msg, error=False):
"""Print message in console."""
def green_msg(msg):
"""Make message green color in console."""
return '\033[92m{0}\033[00m'.format(msg)
def red_msg(msg):
"""Make message red color in console."""
return '\033[91m{0}\033[00m'.format(msg)
print_function = red_msg if error else green_msg
print(print_function('\n{}\n'.format(msg)))
# MAIN COMMANDS
# ============================================================================
@task
def manage(command):
"""Run ``python3 manage.py`` command."""
return local('python3 manage.py {}'.format(command))
@task
def run():
"""Run server."""
return manage('runserver')
@task
def shell():
"""Run server."""
return manage('shell_plus')
# GIT
# ============================================================================
@task
def push(force=False):
"""Push changes to all servers."""
force = ' --force' if force else ''
print_msg('1. Pushing to origin')
local(f'git push origin master --tags{force}')
print_msg('2. Pushing to Heroku')
local(f'git push heroku master{force}')
# LOCALES
# ============================================================================
@task
def makemessages():
"""Make messages."""
return manage('makemessages -l ru --no-location')
@task
def compilemessages():
"""Compile messages."""
return manage('compilemessages')
# MIGRATIONS AND DATABASE
# ============================================================================
@task
def makemigrations():
"""Make migrations for database."""
manage('makemigrations')
@task
def migrate():
"""Apply migrations to database."""
print_msg('Applying migrations')
manage('migrate')
@task
def createsuperuser(email='root@root.ru'):
"""Create superuser with default credentials."""
print_msg('Creating superuser')
return manage('createsuperuser --username root --email {}'.format(email))
@task
def resetdb():
"""Reset database to initial state."""
print_msg('Remove "scr/media" folder')
local('rm -rf media/')
print_msg('Reset database')
manage('reset_db -c --noinput')
migrate()
createsuperuser()
# STATIC CHECKS: ISORT AND PEP8
# ============================================================================
@task
def isort():
"""Fix imports formatting."""
print_msg('Running imports fix')
local('isort apps config -y -rc')
@task
def pep8(path='apps core'):
"""Check PEP8 errors."""
print_msg('Checking PEP8 errors')
return local('flake8 --config=.flake8 {}'.format(path))
# PIPENV
# ============================================================================
@task
def lock():
"""Lock requirements."""
print_msg('Locking requirements')
local('pipenv lock')
@task
def install():
"""Install requirements."""
print_msg('Installing DEV requirements')
local('pipenv install --dev')
# HEROKU
# ============================================================================
@task
def logs():
local('heroku logs --source app --tail')
@task
def scale(value=1):
local(f'heroku ps:scale web={value}')
@task
def ps():
local(f'heroku ps')
@task
def runweb():
local(f'heroku local web -f Procfile.local')
@task
def python(command):
local(f'heroku run python {command}')
| [
"dkrumko@gmail.com"
] | dkrumko@gmail.com |
576b6d9babf5c6a9873f3626e654acd855eb9a57 | e547f7a92e7a1c1d79f8631f9e8ee8a93879a4eb | /src/ecpp_individual_grammar_all_states.py | b9efc69d0dd8291e782f6fe9c3c66b6e7bc673e7 | [] | no_license | gsakkas/seq2parse | 3c33ec7bc6cc6e4abd9e4981e53efdc173b7a7b9 | 7ae0681f1139cb873868727f035c1b7a369c3eb9 | refs/heads/main | 2023-04-09T12:29:37.902066 | 2023-01-18T21:32:12 | 2023-01-18T21:32:12 | 417,597,310 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 58,203 | py | """
Error Correcting Python Earley Parser.
@author: Georgios Sakkas, Earley Parser based on Hardik's implementation
"""
import argparse
import re
# from ast import parse
from pathlib import Path
from collections import defaultdict, deque
from itertools import product
from nltk.tree import Tree
from functools import partial
import pygments
from pygments.lexers import get_lexer_by_name
from pygments.token import Text, Name, Number, String, Punctuation, Operator, Keyword
class Rule():
"""
Represents a CFG rule.
"""
def __init__(self, lhs, rhs):
# Represents the rule 'lhs -> rhs', where lhs is a non-terminal and
# rhs is a list of non-terminals and terminals.
self.lhs, self.rhs = lhs, rhs
def __contains__(self, sym):
return sym in self.rhs
def __eq__(self, other):
if isinstance(other, (Rule, ErrorRule)):
return self.lhs == other.lhs and self.rhs == other.rhs
return False
def __hash__(self):
return hash((" ".join(self.lhs), " ".join(self.rhs)))
def __getitem__(self, i):
return self.rhs[i]
def __len__(self):
return len(self.rhs)
def __repr__(self):
return self.__str__()
def __str__(self):
return self.lhs + ' -> ' + ' '.join(self.rhs)
def error_score(self):
return 0
class ErrorRule(Rule):
def error_score(self):
if self.lhs in ['Err_Dedent', 'Err_Indent', 'Err_Close_Paren', 'Err_Open_Paren']:
return 0.5
return 1
class Grammar():
"""
Represents a CFG.
"""
def __init__(self):
# The rules are represented as a dictionary from L.H.S to R.H.S.
self.rules = defaultdict(list)
def add(self, rule):
"""
Adds the given rule to the grammar.
"""
if rule not in self.rules[rule.lhs]:
self.rules[rule.lhs].append(rule)
def get_alphabet(self):
symbols = set([])
alphabet = set([])
for key in self.rules:
for rule in self.rules[key]:
for sym in rule.rhs:
symbols.add(sym)
for sym in symbols:
if self.is_terminal(sym):
if sym not in alphabet:
alphabet.add(sym)
return alphabet
def get_tags(self):
symbols = set([])
tags = set([])
for key in self.rules:
for rule in self.rules[key]:
for sym in rule.rhs:
symbols.add(sym)
for sym in symbols:
if self.is_tag(sym):
if sym not in tags:
tags.add(sym)
return tags
@staticmethod
def load_grammar(fpath):
"""
Loads the grammar from file (from the )
"""
grammar = Grammar()
with open(fpath) as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
entries = line.split('->')
lhs = entries[0].strip()
for rhs in entries[1].split('<|>'):
grammar.add(Rule(lhs, rhs.strip().split()))
return grammar
def __repr__(self):
return self.__str__()
def __str__(self):
s = [str(r) for r in self.rules['S']]
for nt, rule_list in self.rules.items():
if nt == 'S':
continue
s += [str(r) for r in rule_list]
return '\n'.join(s)
# Returns the rules for a given Non-terminal.
def __getitem__(self, nt):
return self.rules[nt]
def is_terminal(self, sym):
"""
Checks is the given symbol is terminal.
"""
return len(self.rules[sym]) == 0
def is_tag(self, sym):
"""
Checks whether the given symbol is a tag, i.e. a non-terminal with rules
to solely terminals.
"""
if not self.is_terminal(sym):
return all(self.is_terminal(s) for r in self.rules[sym] for s in
r.rhs)
return False
def is_nullable(self, sym):
"""
Checks whether the given symbol is nullable, i.e. a non-terminal with rules
to null.
"""
if not self.is_terminal(sym):
return any(r.rhs==[] for r in self.rules[sym])
class ErrorGrammar(Grammar):
@staticmethod
def load_grammar(fpath):
"""
Loads the grammar from file (from the )
"""
grammar = ErrorGrammar()
with open(fpath) as f:
for line in f:
line = line.strip()
if len(line) == 0:
continue
entries = line.split('->')
lhs = entries[0].strip()
for rhs in entries[1].split('<|>'):
rhssymbols = rhs.strip().split()
grammar.add(Rule(lhs, rhssymbols))
return grammar
def update_error_grammar(self, changes):
# print(len(str(self).split('\n')))
alphabet = self.get_alphabet()
tags = self.get_tags()
alphabet.remove('_ENDMARKER_')
#second step
all_important_tags = []
for ch in changes:
important_terminals = ch[1].split() if ch[1] is not None else []
# important_terminals += ch[2].split() if ch[2] is not None else []
important_tags = []
for tag in tags:
if ch[2] is not None:
for tok in ch[2].split():
if any([(tok in rule) for rule in self.rules[tag]]):
important_tags.append(tag)
important_tags = list(set(important_tags))
if ch[0] == 'deleted':
for sym in important_tags:
# self.add(ErrorRule("Err_" + sym, [sym]))
self.add(Rule("Err_" + sym, ["H", sym]))
# self.add(ErrorRule("Err_" + sym, ["InsertErr", sym]))
all_important_tags.append(sym)
for sym in important_terminals:
self.add(ErrorRule("InsertErr", [sym]))
elif ch[0] == 'added':
for sym in important_tags:
# self.add(ErrorRule("Err_" + sym, [sym]))
self.add(ErrorRule("Err_" + sym, ["Err_Tag"]))
all_important_tags.append(sym)
self.add(ErrorRule("Err_" + sym, []))
else:
for sym in important_tags:
# self.add(ErrorRule("Err_" + sym, [sym]))
self.add(ErrorRule("Err_" + sym, ["Err_Tag"]))
self.add(Rule("Err_" + sym, ["H", sym]))
all_important_tags.append(sym)
self.add(ErrorRule("Err_" + sym, []))
for sym in important_terminals:
self.add(ErrorRule("Err_Tag", [sym]))
self.add(ErrorRule("InsertErr", [sym]))
all_important_tags = list(set(all_important_tags))
#first step in Algorithm 1 in AHO's paper
added_rules_1 = []
for key in self.rules:
if not key.startswith('Err_') and not key.startswith('InsertErr'):
for rule in self.rules[key]:
if rule.error_score() < 1:
new_rules_1 = ErrorGrammar.error_rule_1(self, rule, all_important_tags)
if new_rules_1:
added_rules_1.extend(new_rules_1)
for rule in added_rules_1:
self.add(rule)
# print('++++++++++++++++++++++++++')
# for key in self.rules:
# for rule in self.rules[key]:
# if rule.error_score() > 0:
# print(rule)
# print('++++++++++++++++++++++++++')
#third step
self.add(Rule("S'", ["S"]))
self.add(Rule("S'", ["S", "InsertErr"]))
self.add(Rule("H", ["H", "InsertErr"]))
self.add(Rule("H", ["InsertErr"]))
# print(self)
# print(len(str(self).split('\n')))
error_rules = [r for k in self.rules for r in self.rules[k]]
error_rules = list(filter(lambda er: er.lhs.startswith('Err_') or er.lhs == 'InsertErr', error_rules))
return error_rules
@staticmethod
def error_rule_1(grammar, rule, itags):
if grammar.is_tag(rule.lhs):
return []
new_rules_rhs = []
flag = False
for sym in rule.rhs:
if not grammar.is_tag(sym):
# if not grammar.is_tag(sym) or sym in ['Endmarker']:
new_rules_rhs.append([sym])
elif sym in itags:
new_rules_rhs.append(["Err_" + sym, sym])
flag = True
else:
new_rules_rhs.append([sym])
new_rules = []
for rule_rhs in product(*new_rules_rhs):
if list(rule_rhs) != rule.rhs:
new_rules.append(Rule(rule.lhs, list(rule_rhs)))
return new_rules
@staticmethod
def error_rule_0(grammar, rule, itags):
if rule.lhs in ['Annotated_Assign']:
return []
if grammar.is_tag(rule.lhs):
return []
new_rules_rhs = []
flag = False
for sym in rule.rhs:
if not grammar.is_tag(sym):
# if not grammar.is_tag(sym) or sym in ['Endmarker']:
new_rules_rhs.append([sym])
elif sym in itags:
new_rules_rhs.append(["Err_" + sym, sym])
flag = True
else:
new_rules_rhs.append([sym])
new_rules = []
for rule_rhs in product(*new_rules_rhs):
if list(rule_rhs) != rule.rhs:
new_rules.append(Rule(rule.lhs, list(rule_rhs)))
return new_rules
def update_error_grammar_with_erules(self, erules):
# print(len(str(self).split('\n')))
alphabet = self.get_alphabet()
tags = self.get_tags()
alphabet.remove('_ENDMARKER_')
# Maybe remove "Tfpdef -> Vfpdef Err_Colon Test" typed definitions in the future
#second step
all_important_tags = []
for erl in erules:
entries = erl.split('->')
lhs = entries[0].strip()
if 'H ' in entries[1]:
self.add(Rule(lhs, entries[1].strip().split()))
else:
self.add(ErrorRule(lhs, entries[1].strip().split()))
if lhs.startswith('Err_'):
sym = lhs.replace('Err_', '')
all_important_tags.append(sym)
all_important_tags = list(set(all_important_tags))
#first step in Algorithm 1 in AHO's paper
added_rules_1 = []
for key in self.rules:
if not key.startswith('Err_') and not key.startswith('InsertErr'):
for rule in self.rules[key]:
if rule.error_score() < 1:
new_rules_0 = ErrorGrammar.error_rule_0(self, rule, all_important_tags)
if new_rules_0:
added_rules_1.extend(new_rules_0)
for rule in added_rules_1:
self.add(rule)
# print('++++++++++++++++++++++++++')
# num_of_erules = 0
# for key in self.rules:
# for rule in self.rules[key]:
# if rule.error_score() > 0:
# print(rule)
# num_of_erules += 1
# print(num_of_erules)
# print('++++++++++++++++++++++++++')
#third step
self.add(Rule("S'", ["S"]))
self.add(Rule("S'", ["S", "InsertErr"]))
self.add(Rule("H", ["H", "InsertErr"]))
self.add(Rule("H", ["InsertErr"]))
# print(self)
# print(len(str(self).split('\n')))
class State():
"""
Represents a state in the error-correcting Earley algorithm.
"""
GAM = '<GAM>'
def __init__(self, rule, dot=0, sent_pos=0, chart_pos=0, error_count=0, back_pointers=[]):
# CFG Rule.
self.rule = rule
# Dot position in the rule.
self.dot = dot
# Sentence position.
self.sent_pos = sent_pos
# Chart index.
self.chart_pos = chart_pos
# Error counter
self.error_count = error_count
# Pointers to child states (if the given state was generated using
# Completer).
self.back_pointers = back_pointers
# Hash for the back_pointers for efficient usage
self.own_hash = hash((self.rule, self.dot, self.sent_pos, self.error_count)) + \
hash(", ".join(str(hash(s)) for s in back_pointers) if back_pointers else hash("empty"))
def __eq__(self, other):
if isinstance(other, State):
return self.rule == other.rule and self.dot == other.dot and \
self.sent_pos == other.sent_pos and self.error_count == other.error_count and \
self.own_hash == other.own_hash
return False
def __hash__(self):
return self.own_hash
def __len__(self):
return len(self.rule)
def __repr__(self):
return self.__str__()
def __str__(self):
def str_helper(state):
return ('(' + state.rule.lhs + ' -> ' +
' '.join(state.rule.rhs[:state.dot] + ['*'] +
state.rule.rhs[state.dot:]) +
(', [%d, %d], %d)' % (state.sent_pos, state.chart_pos, state.error_count)))
return (str_helper(self) +
' (' + ', '.join(str_helper(s) for s in self.back_pointers) + ')')
def next(self):
"""
Return next symbol to parse, i.e. the one after the dot
"""
if self.dot < len(self):
return self.rule[self.dot]
def is_complete(self):
"""
Checks whether the given state is complete.
"""
return len(self) == self.dot
@staticmethod
def init():
"""
Returns the state used to initialize the chart in the Earley algorithm.
"""
return State(Rule(State.GAM, ["S'"]))
class ChartEntry():
"""
Represents an entry in the chart used by the Earley algorithm.
"""
def __init__(self, states):
# List of Earley states.
self.states = deque(states)
self.seen = dict([])
def __iter__(self):
return iter(list(self.states) + list(self.seen.keys()))
def __len__(self):
return len(self.states) + len(self.seen.keys())
def __repr__(self):
return self.__str__()
def __str__(self):
return '\n'.join(str(s) for s in list(self.states) + list(self.seen.keys()))
def pop_state(self, cost):
"""
Return the next unseen state
"""
while len(self.states) > 0:
state = self.states.popleft()
if state.error_count <= cost:
if state not in self.seen:
self.seen[state] = state.error_count
return state
return None
def add(self, grammar, state):
"""
Add the given state (if it hasn't already been added).
"""
if state not in self.states and state not in self.seen:
if state.is_complete() and grammar.is_nullable(state.rule.lhs):
self.states.append(state)
else:
self.states.appendleft(state)
class Chart():
"""
Represents the chart used in the Earley algorithm.
"""
def __init__(self, entries):
# List of chart entries.
self.entries = entries
def __getitem__(self, i):
return self.entries[i]
def __len__(self):
return len(self.entries)
def __repr__(self):
return self.__str__()
def __str__(self):
return '\n\n'.join([("Chart[%d]:\n" % i) + str(entry) for i, entry in
enumerate(self.entries)])
@staticmethod
def init(l):
"""
Initializes a chart with l entries (Including the dummy start state).
"""
return Chart([(ChartEntry([]) if i > 0 else
ChartEntry([State.init()])) for i in range(l)])
class ErrorEarleyParse():
"""
Represents the Error-correcting Earley-generated parse for a given sentence according to a
given grammar.
"""
def __init__(self, sentence, grammar, max_cost=3):
self.words = sentence.split()
self.grammar = grammar
self.chart = Chart.init(len(self.words) + 1)
# Maximum number of error correcting rules to use
self.max_cost = max_cost
def predictor(self, state, pos):
"""
Error-correcting Earley Predictor.
"""
# runs = 0
for rule in self.grammar[state.next()]:
# This is my optimization to avoid using that many ErrorRules
self.chart[pos].add(self.grammar, State(rule, dot=0,
sent_pos=state.chart_pos, chart_pos=state.chart_pos))
# runs += 1
# print("===================")
# print("<<Predictor>>")
# print("Chart[" + str(pos) + "]")
# print(self.chart[pos])
# print(">>>", state)
# print("===================")
# return runs
def scanner(self, state, pos):
"""
Error-correcting Earley Scanner.
"""
# runs = 1
if state.chart_pos < len(self.words):
word = self.words[state.chart_pos]
# runs = ([(word in r) for r in self.grammar[state.next()]] + [True]).index(True)
if any((word in r) for r in self.grammar[state.next()]):
new_cost = 1 if state.next().startswith('Err_') or state.next() == 'InsertErr' else 0
if new_cost <= self.max_cost:
self.chart[pos + 1].add(self.grammar, State(Rule(state.next(), [word]),
dot=1, sent_pos=state.chart_pos,
chart_pos=(state.chart_pos + 1),
error_count=new_cost))
# print("===================")
# print("<<Scanner>>")
# print("Chart[" + str(pos+1) + "]")
# print(self.chart[pos+1])
# print(">>>", state)
# print("===================")
# return runs
def completer(self, state, pos):
"""
Error-correcting Earley Completer.
"""
# runs = 0
# print("===================")
# print("<<Completer>>")
# print(">>>", state)
for prev_state in self.chart[state.sent_pos]:
if prev_state.next() == state.rule.lhs:
new_cost = prev_state.error_count + state.error_count + state.rule.error_score()
if state.rule.lhs == 'Err_Tag':
if state.rule.rhs == []:
new_cost = 1
elif any((state.rule.rhs[0] in r) for r in self.grammar[prev_state.rule.lhs.replace('Err_', '')]):
new_cost = prev_state.error_count
if new_cost <= self.max_cost:
self.chart[pos].add(self.grammar, State(prev_state.rule,
dot=(prev_state.dot + 1), sent_pos=prev_state.sent_pos,
chart_pos=pos,
back_pointers=(prev_state.back_pointers + [state]),
error_count=new_cost))
# runs += 1
# print("Chart[" + str(pos) + "]")
# print(self.chart[pos])
# print("===================")
# return runs
def parse(self):
"""
Parses the sentence by running the Earley algorithm and filling out the
chart.
"""
# Checks whether the next symbol for the given state is a tag.
def is_tag(state):
return self.grammar.is_tag(state.next())
for i in range(len(self.chart)):
# print("Chart[" + str(i) + "]")
# print(len(self.chart[i]))
# jj = 0
# print("===================")
state = self.chart[i].pop_state(self.max_cost)
while state is not None:
# print(">>>", state)
# print("===================")
# jj += 1
if not state.is_complete():
if is_tag(state) and not self.grammar.is_nullable(state.next()):
self.scanner(state, i)
else:
self.predictor(state, i)
# if state.next().startswith('Err_'):
# print(state)
# if self.grammar.is_nullable(state.next()):
# print("FUCCKYEAH", state)
# self.completer(state, i)
# self.chart[i].add_other_states(state)
else:
self.completer(state, i)
state = self.chart[i].pop_state(self.max_cost)
# print("YOOO!", state, state == None)
# print(jj)
# print(self.chart[i])
# print(len(self.chart[i]))
# break
# if self.get_parses() > 1:
# print("Cost =", cost)
# break
# for i in range(len(self.chart)):
# print("Chart[" + str(i) + "]")
# # print("===================")
# print(len(self.chart[i]))
# print(self.chart[i])
# # print("Cost =", cost)
# print(self.chart)
def has_parse(self):
"""
Checks whether the sentence has a parse.
"""
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == "S'" and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
return True
return False
def get_parses(self):
"""
Checks whether the sentence has a parse.
"""
num_of_parses = 0
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == "S'" and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
num_of_parses += 1
return num_of_parses
def get(self):
"""
Returns the minimum error parse if it exists, otherwise returns None.
"""
def get_helper(state):
# print(state)
if self.grammar.is_tag(state.rule.lhs):
if state.rule.rhs != []:
return Tree(state.rule.lhs, [state.rule.rhs[0]])
return Tree(state.rule.lhs, [state.rule.lhs.replace('Err_', '')])
return Tree(state.rule.lhs,
[get_helper(s) for s in state.back_pointers])
found_state = None
errors = float("inf")
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == "S'" and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
if state.error_count < errors:
# get_helper(state).pretty_print()
# print(state.error_count)
found_state = state
errors = state.error_count
if found_state is not None:
return get_helper(found_state)
return None
def get_rules(self):
"""
Returns the minimum error parse if it exists, otherwise returns None.
"""
def get_helper(state):
# print(state)
if self.grammar.is_tag(state.rule.lhs):
return [state.rule]
result = [state.rule]
for s in state.back_pointers:
result.extend(get_helper(s))
return result
found_state = None
errors = float("inf")
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == "S'" and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
if state.error_count < errors:
found_state = state
errors = state.error_count
if found_state is not None:
return get_helper(found_state)
return None
def get_fixed_seq(self):
"""
Returns the minimum error parse if it exists, otherwise returns None.
"""
def get_helperrr(state):
# print(state)
if self.grammar.is_tag(state.rule.lhs) or \
(self.grammar.is_tag(state.rule.lhs.replace('Err_', '')) and \
(state.rule.rhs == [] or state.rule.rhs[0] != 'H')):
if state.rule.rhs != []:
return Tree(state.rule.lhs, [state.rule.rhs[0]])
return Tree(state.rule.lhs, [state.rule.lhs.replace('Err_', '')])
return Tree(state.rule.lhs,
[get_helperrr(s) for s in state.back_pointers])
def get_helper(state):
if self.grammar.is_tag(state.rule.lhs) or \
(self.grammar.is_tag(state.rule.lhs.replace('Err_', '')) and \
(state.rule.rhs == [] or state.rule.rhs[0] != 'H')):
return [state]
if state.rule.rhs:
if state.rule.rhs[0] == 'Err_Tag':
return [state]
result = []
for s in state.back_pointers:
result.extend(get_helper(s))
return result
def get_erules(state):
if self.grammar.is_tag(state.rule.lhs):
return [state.rule]
result = [state.rule]
for s in state.back_pointers:
result.extend(get_erules(s))
return result
found_states = []
for state in self.chart[-1]:
if state.is_complete() and state.rule.lhs == "S'" and \
state.sent_pos == 0 and state.chart_pos == len(self.words):
# print(get_helper(state))
candidate = get_helper(state)
used_erules = get_erules(state)
# if any(map(lambda st: "Err" in st.rule.lhs, candidate)):
found_states.append((candidate, used_erules, state.error_count))
# get_helperrr(state).draw()
if found_states:
found_states = sorted(found_states, key=lambda st: st[2])
# cand_parses, used_erls, costs = zip(*found_states)
# print(costs)
# for jst in just_states:
# print(jst)
# for st in elsse:
# get_helperrr(st).draw()
# Return Top N = 10 repairs with lowest cost
return found_states
return None
class Lexer():
"""
Simple lexer for Python programs
"""
def __init__(self, terminals):
self.lexer = get_lexer_by_name("python")
self.terminals = terminals
def lex(self, input_program):
program = input_program
if len(input_program) > 1:
if input_program[-1] != '\n':
program = input_program + '\n'
program = self.remove_comments_and_strings(program)
# Some hacks for random errors
if "’" in program:
program = program.replace("’", "'")
program = self.remove_comments_and_strings(program)
# Clean tabs
all_lines = []
for line in program.split('\n'):
spaces_so_far = 0
if len(line) > 0:
if line[0] in [' ', '\t']:
for char in line:
if char == ' ':
spaces_so_far += 1
elif char == '\t':
spaces_so_far = (spaces_so_far // 4 + 1) * 4
else:
break
all_lines.append(' ' * spaces_so_far + line.lstrip().replace('\t', ' '))
all_lines = list(map(lambda line: list(pygments.lex(line.rstrip(), self.lexer)), all_lines))
all_lines = self.update_indents_stack(all_lines)
all_lines = self.update_spaces_and_nls(all_lines)
all_lines = self.update_tokens(all_lines)
tokens = [tok for line in all_lines for tok in line]
tokens = self.final_cleaning(tokens, False)
return tokens
def clean_with_lex(self, input_program):
program = input_program
# print(program)
if len(input_program) > 1:
if input_program[-1] != '\n':
program = input_program + '\n'
program = self.remove_comments_and_strings(program)
# Some hacks for random errors
if "’" in program:
program = program.replace("’", "'")
program = self.remove_comments_and_strings(program)
# Store strings for later use
all_strings, all_string_types = self.get_comments_and_strings(input_program, program)
# print(all_strings, all_string_types)
# Clean tabs
all_lines = []
for line in program.split('\n'):
spaces_so_far = 0
if len(line) > 0:
if line[0] in [' ', '\t']:
for char in line:
if char == ' ':
spaces_so_far += 1
elif char == '\t':
spaces_so_far = (spaces_so_far // 4 + 1) * 4
else:
break
all_lines.append(' ' * spaces_so_far + line.lstrip().replace('\t', ' '))
all_lines = list(map(lambda line: list(pygments.lex(line.rstrip(), self.lexer)), all_lines))
all_lines = self.update_indents_stack(all_lines)
all_lines = self.update_spaces_and_nls(all_lines)
all_lines = self.update_tokens_with_actual(all_lines)
tokens = [tok for line in all_lines for tok in line]
tokens = self.final_cleaning(tokens, True)
# Put strings back
# print('-' * 42 + '\n' + input_program + '\n' + '=' * 42 + '\n' + tokens.replace('_NEWLINE_ ', '\n') + '\n' + '*' * 42 + '\n')
if tokens.count("_STRING_") == len(all_strings):
# prev_len = len(tokens.split())
# new_tokens = tokens
for string, stype in zip(all_strings, all_string_types):
if ' ' in string:
string = string.replace(' ', "_white_space_")
tokens = tokens.replace('_STRING_', stype + string + stype, 1)
# print(len(new_tokens.split()), prev_len)
# print(new_tokens.split())
# print(tokens.split())
# if len(new_tokens.split()) == len(tokens.split()):
# tokens = new_tokens
# else:
# tokens = tokens.replace('_STRING_', "\"_some_string_\"")
else:
tokens = tokens.replace('_STRING_', "\"_some_string_\"")
# print('-' * 42 + '\n' + '=' * 42 + '\n' + tokens.replace('_NEWLINE_ ', '\n') + '\n' + '*' * 42 + '\n')
return tokens
def remove_comments_and_strings(self, input_prog):
prog = input_prog.replace("\r\n", "\n")
prog = re.sub(re.compile(r"\\\s*?\n") , "" , prog)
# Temporary replacements
prog = prog.replace("\\\\", "__temporary__")
prog = prog.replace("\\\"", "__double_quote__")
prog = prog.replace("\\\'", "__single_quote__")
prog = prog.replace("__temporary__", "\\\\")
# String and comment replacements
prog = re.sub(re.compile(r"\n\s*#.*?\n") , "\n" , prog)
prog = re.sub(re.compile(r"\"\"\".*?\"\"\"", flags=re.DOTALL) , "__triple_dstring__" , prog)
prog = re.sub(re.compile(r"\'\'\'.*?\'\'\'", flags=re.DOTALL) , "__triple_sstring__" , prog)
in_double_quote = False
in_single_quote = False
in_comment = False
new_prog = ""
for char in prog:
if not in_comment:
if not in_double_quote and not in_single_quote and char == "#":
in_comment = True
new_prog += char
elif not in_double_quote and not in_single_quote and char == "\"":
in_double_quote = True
new_prog += char
elif not in_double_quote and not in_single_quote and char == "\'":
in_single_quote = True
new_prog += char
elif in_double_quote and not in_single_quote and char == "\"":
in_double_quote = False
new_prog += char
elif in_double_quote and not in_single_quote and char == "\'":
new_prog += "__single_quote__"
elif not in_double_quote and in_single_quote and char == "\'":
in_single_quote = False
new_prog += char
elif not in_double_quote and in_single_quote and char == "\"":
new_prog += "__double_quote__"
else:
new_prog += char
else:
if char == "\n":
in_comment = False
new_prog += char
elif char == "\'":
new_prog += "__single_quote__"
elif char == "\"":
new_prog += "__double_quote__"
else:
new_prog += char
prog = new_prog
prog = re.sub(re.compile(r"\"([^(\"|\'|\n)]|\(|\)|\|)*?\"") , "\"__string__\"" , prog)
prog = re.sub(re.compile(r"\'([^(\"|\'|\n)]|\(|\)|\|)*?\'") , "\'__string__\'" , prog)
prog = prog.replace("__triple_dstring__", "\"__string__\"")
prog = prog.replace("__triple_sstring__", "\'__string__\'")
prog = re.sub(re.compile(r"#.*?\n" ) , "\n" , prog)
prog = re.sub(re.compile(r"\n\s+\n" ) , "\n" , prog)
while prog.find('\n\n') >= 0:
prog = prog.replace('\n\n', '\n')
return prog
def get_comments_and_strings(self, input_prog, prog):
strings = []
string_types = []
clean_input_prog = input_prog.replace("\r\n", "\n")
clean_input_prog = clean_input_prog.replace("’", "'")
if len(clean_input_prog) > 1:
if clean_input_prog[-1] != '\n':
clean_input_prog = clean_input_prog + '\n'
clean_input_prog = re.sub(re.compile(r"\\\s*?\n") , "" , clean_input_prog)
clean_input_prog = re.sub(re.compile(r"\n\s*#.*?\n") , "\n" , clean_input_prog)
clean_input_prog = re.sub(re.compile(r"#[^\'\"]*?\n" ) , "\n" , clean_input_prog)
clean_input_prog = re.sub(re.compile(r"\n\s+\n" ) , "\n" , clean_input_prog)
while clean_input_prog.find('\n\n') >= 0:
clean_input_prog = clean_input_prog.replace('\n\n', '\n')
# print("=*" * 42 + "=")
# print(clean_input_prog)
parts = prog.split("__string__")
# print(parts)
while parts:
part = parts.pop(0)
clean_input_prog = clean_input_prog.replace(part, '', 1)
if parts == [] or clean_input_prog == "" or clean_input_prog.split(parts[0]) == []:
break
split_prog = clean_input_prog.split(parts[0])
strings.append(split_prog[0].replace('\n', '_NEWLINE_'))
clean_input_prog = clean_input_prog.replace(strings[-1], '', 1)
if len(clean_input_prog) > 2 and clean_input_prog[:3] == "\"\"\"":
string_types.append("\"\"\"")
elif len(clean_input_prog) > 2 and clean_input_prog[:3] == "\'\'\'":
string_types.append("\'\'\'")
elif len(clean_input_prog) > 0 and clean_input_prog[0] == "\"":
string_types.append("\"")
elif len(clean_input_prog) > 0 and clean_input_prog[0] == "\'":
string_types.append("\'")
else:
string_types.append("\"")
return strings, string_types
def update_indents_stack(self, all_lines):
all_line_tokens = []
lst_token_prev_line = False
fst_token_this_line = False
indents = []
paren_so_far = 0
curly_so_far = 0
square_so_far = 0
for token_list in all_lines:
fst_token = token_list[0]
tok_idx = 0
fst_real_token = token_list[tok_idx]
while fst_real_token[0] in Text and fst_real_token[1].replace(' ', '') == '':
tok_idx += 1
if tok_idx < len(token_list):
fst_real_token = token_list[tok_idx]
else:
break
fst_token_this_line = fst_real_token[0] in Operator and fst_real_token[1] in ['+', '-', '*', '/', '//', '%', '==', '!=', 'in', 'or', 'and'] and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
fst_token_this_line |= fst_real_token[0] in Punctuation and fst_real_token[1] in [',', '}', ')', ']'] and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
fst_token_this_line |= fst_real_token[0] in Punctuation and fst_real_token[1] in ['(', '['] and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
fst_token_this_line |= fst_real_token[0] in Keyword and fst_real_token[1] == 'for' and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
fst_token_this_line |= fst_real_token[0] in String and lst_token_prev_line and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
if lst_token_prev_line:
# Checks if previous line ends with an operator, paren etc. and we are within a parenthesis, thus we must not indent
last_line_tokens = all_line_tokens.pop()
if len(last_line_tokens) > 1:
all_line_tokens.append(last_line_tokens[:-1])
all_line_tokens.append(token_list)
elif fst_token_this_line:
# Checks if line starts with an operator and we are within a parenthesis, thus we must not indent
last_line_tokens = all_line_tokens.pop()
if len(last_line_tokens) > 1:
all_line_tokens.append(last_line_tokens[:-1])
all_line_tokens.append(token_list[tok_idx:])
elif fst_token[0] in Text and fst_token[1].replace(' ', '') == '':
this_indent = len(fst_token[1])
if indents == [] and this_indent > 0:
indents.append(this_indent)
all_line_tokens.append([(fst_token[0], '_INDENT_')] + token_list[1:])
elif indents == []:
all_line_tokens.append(token_list[1:])
elif indents[-1] == this_indent:
all_line_tokens.append(token_list[1:])
elif indents[-1] < this_indent:
indents.append(this_indent)
all_line_tokens.append([(fst_token[0], '_INDENT_')] + token_list[1:])
elif indents[-1] > this_indent:
dedents = 0
while indents[-1] > this_indent:
dedents += 1
indents.pop()
if indents == []:
break
all_line_tokens.append([(fst_token[0], '_DEDENT_')] * dedents + token_list[1:])
elif not(fst_token[0] in Text and fst_token[1].replace('\n', '') == '') and \
len(indents) > 0:
all_line_tokens.append([(Text, '_DEDENT_')] * len(indents) + token_list)
indents = []
else:
all_line_tokens.append(token_list)
if len(token_list) > 1:
lst_token = token_list[-2]
for tok in token_list:
if tok[0] in Punctuation and tok[1] == '(':
paren_so_far += 1
elif tok[0] in Punctuation and tok[1] == ')':
paren_so_far -= 1
for tok in token_list:
if tok[0] in Punctuation and tok[1] == '{':
curly_so_far += 1
elif tok[0] in Punctuation and tok[1] == '}':
curly_so_far -= 1
for tok in token_list:
if tok[0] in Punctuation and tok[1] == '[':
square_so_far += 1
elif tok[0] in Punctuation and tok[1] == ']':
square_so_far -= 1
lst_token_prev_line = lst_token[0] in Punctuation and lst_token[1] in ['\\', '{', '(', '[']
lst_token_prev_line |= lst_token[0] in Punctuation and lst_token[1] == ',' and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
lst_token_prev_line |= token_list[-1][0] in Text and token_list[-1][1] == '\\\n'
lst_token_prev_line |= lst_token[0] in Punctuation and lst_token[1] == ':' and curly_so_far > 0
lst_token_prev_line |= lst_token[0] in Operator and lst_token[1] in ['+', '-', '*', '/', '//', '%', '==', '!=', 'in', 'or', 'and'] and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
lst_token_prev_line |= lst_token[0] in String and (paren_so_far > 0 or curly_so_far > 0 or square_so_far > 0)
if len(indents) > 0:
all_line_tokens.append([(Text, '_DEDENT_')] * len(indents))
return all_line_tokens
def update_spaces_and_nls(self, all_lines):
def is_space(token):
return token[0] in Text and token[1].replace(' ', '') == ''
all_line_tokens = []
for token_list in all_lines:
token_list_no_spaces = list(filter(lambda tok: not is_space(tok), token_list))
last_token = token_list_no_spaces[-1]
if last_token[0] in Text and '\n' in last_token[1]:
all_line_tokens.append(token_list_no_spaces[:-1] + [(last_token[0], '_NEWLINE_')])
else:
all_line_tokens.append(token_list_no_spaces)
return all_line_tokens
def update_tokens(self, all_lines):
all_line_tokens = []
for token_list in all_lines:
new_token_list = []
prev_num = False
for tok in token_list:
if tok[0] in Number:
prev_num = True
else:
if prev_num and tok[0] in Name and tok[1] == 'j':
prev_tok = new_token_list.pop()
tok = (prev_tok[0], prev_tok[1] + 'j')
prev_num = False
new_token_list.append(tok)
new_token_list = list(map(self.choose_token_represent, new_token_list))
all_line_tokens.append(new_token_list)
return all_line_tokens
def choose_token_represent(self, token):
if token[0] in Name and token[1] != '.':
return '_NAME_'
elif token[0] in Number:
return '_NUMBER_'
elif token[0] in String:
return '_STRING_'
return token[1]
def update_tokens_with_actual(self, all_lines):
all_line_tokens = []
for token_list in all_lines:
new_token_list = []
prev_num = False
for tok in token_list:
if tok[0] in Number:
prev_num = True
else:
if prev_num and tok[0] in Name and tok[1] == 'j':
prev_tok = new_token_list.pop()
tok = (prev_tok[0], prev_tok[1] + 'j')
prev_num = False
new_token_list.append(tok)
# Abstract String tokens for now. Will insert them back later
new_token_list = list(map(lambda x: '_STRING_' if x[0] in String else x[1], new_token_list))
all_line_tokens.append(new_token_list)
return all_line_tokens
def final_cleaning(self, tokens, is_actual):
tokens.append('_ENDMARKER_')
tokens = " ".join(tokens)
tokens = tokens.replace('* *', "**")
tokens = tokens.replace('= =', "==")
tokens = tokens.replace('< =', "<=")
tokens = tokens.replace('> =', ">=")
tokens = tokens.replace('! =', "!=")
tokens = tokens.replace('< <', "<<")
tokens = tokens.replace("> >", ">>")
tokens = tokens.replace('& &', "&&")
tokens = tokens.replace('| |', "||")
tokens = tokens.replace('/ /', "//")
tokens = tokens.replace('+ =', "+=")
tokens = tokens.replace('- =', "-=")
tokens = tokens.replace('/ =', "/=")
tokens = tokens.replace('* =', "*=")
tokens = tokens.replace('>> =', ">>=")
tokens = tokens.replace('<< =', "<<=")
tokens = tokens.replace('&& =', "&&=")
tokens = tokens.replace('!! =', "!!=")
tokens = tokens.replace('// =', "//=")
tokens = tokens.replace('% =', "%=")
tokens = tokens.replace('@', "@ ")
tokens = tokens.replace('@ =', "@=")
tokens = tokens.replace('| =', "|=")
tokens = tokens.replace('& =', "&=")
tokens = tokens.replace('^ =', "^=")
# tokens = tokens.replace(", ;", ";")
tokens = tokens.replace(". . .", "...")
if not is_actual:
tokens = tokens.replace("not in", "not_in")
tokens = tokens.replace("is not", "is_not")
tokens = tokens.replace("- >", "_arrow_")
else:
tokens = tokens.replace("- >", "->")
while tokens.find('_STRING_ _STRING_') >= 0:
tokens = tokens.replace('_STRING_ _STRING_', '_STRING_')
while tokens.find(' : _NEWLINE_ _NEWLINE_ ') >= 0:
tokens = tokens.replace(' : _NEWLINE_ _NEWLINE_ ', ' : _NEWLINE_ ')
while tokens.find('. _NUMBER_') >= 0:
tokens = tokens.replace('. _NUMBER_', '_NUMBER_')
while tokens.find('_NEWLINE_ )') >= 0:
tokens = tokens.replace('_NEWLINE_ )', ')')
while tokens.find('_NEWLINE_ ]') >= 0:
tokens = tokens.replace('_NEWLINE_ ]', ']')
while tokens.find('_NEWLINE_ }') >= 0:
tokens = tokens.replace('_NEWLINE_ }', '}')
# print(tokens.replace('_NEWLINE_ ', '\n'))
if not is_actual:
tokens = " ".join(map(lambda t: t if t in self.terminals else '_UNKNOWN_', tokens.split()))
# print(tokens.replace('_NEWLINE_ ', '\n'))
return tokens
def read_grammar(grammar_file):
grammar = ErrorGrammar.load_grammar(grammar_file)
return grammar
def prog_has_parse(prog, grammar, terminals):
def run_parse(sentence):
parser = ErrorEarleyParse(sentence, grammar)
parser.parse()
return parser.has_parse(), parser.chart
lexer = Lexer(terminals)
# tokenized_prog = lexer.lex(prog)
# print('-----------------')
# print(tokenized_prog.replace('_NEWLINE_ ', '\n'))
# print('-----------------')
lexed_funs = filter(lambda f: f != '', map(lambda ff: lexer.lex('def ' + ff[1]) if ff[0] > 0 else lexer.lex(ff[1]), enumerate(prog.split('\ndef '))))
for lexed in lexed_funs:
parsed, _ = run_parse(lexed)
if parsed is None:
return False
elif not parsed:
return False
return True
def prog_error_rules(prog, grammar, terminals):
def run_parse(sentence):
parser = ErrorEarleyParse(sentence, grammar)
parser.parse()
return parser.get_rules(), parser.chart
lexer = Lexer(terminals)
# alphabet = grammar.get_alphabet()
# print(alphabet)
# tokenized_prog = lexer.lex(prog)
# print('-----------------')
# print(tokenized_prog.replace('_NEWLINE_ ', '\n'))
# print('-----------------')
# lexed_funs = filter(lambda f: f != '', map(lambda ff: lexer.lex('def ' + ff[1]) if ff[0] > 0 else lexer.lex(ff[1]), enumerate(prog.split('\ndef '))))
# error_rules = []
# for lexed in lexed_funs:
# parsed, _ = run_parse(lexed)
# if parsed is None:
# return []
# elif not parsed:
# return []
# # parse = ErrorEarleyParse(lexed, grammar)
# # parse.parse()
# # parse.get().pretty_print()
# error_rules.extend(list(filter(lambda er: er.lhs.startswith('Err_') or er.lhs == 'InsertErr', parsed)))
lexed_prog = lexer.lex(prog)
error_rules = []
parsed, _ = run_parse(lexed_prog)
if parsed is None:
return []
elif not parsed:
return []
# parse = ErrorEarleyParse(lexed_prog, grammar)
# parse.parse()
# parse.get().pretty_print()
# print(parsed)
error_rules = list(filter(lambda er: er.lhs.startswith('Err_') or er.lhs == 'InsertErr', parsed))
# for rule in list(filter(lambda er: er.lhs.startswith('Err_') or er.lhs == 'InsertErr' or any(map(lambda r: r.startswith('Err_') or r == 'InsertErr', er.rhs)), parsed)):
# print(rule)
return error_rules
def lexed_prog_has_parse(lexed_prog, grammar):
def run_parse(sentence):
parser = ErrorEarleyParse(sentence, grammar)
parser.parse()
return parser.get_rules(), parser.chart
error_rules = []
parsed, _ = run_parse(lexed_prog)
if parsed is None:
return []
elif not parsed:
return []
# parse = ErrorEarleyParse(lexed_prog, grammar)
# parse.parse()
# parse.get().pretty_print()
# print(parsed)
error_rules = list(filter(lambda er: er.lhs.startswith('Err_') or er.lhs == 'InsertErr', parsed))
return error_rules
def rule_updates(st):
if st.rule.lhs.startswith('Err_'):
return st.rule.lhs.replace('Err_', '')
elif st.rule.lhs == 'InsertErr':
return None
else:
return st.rule.lhs
def rule_updates_rhs(grammar, st):
if st.rule.lhs.startswith('Err_'):
return grammar[st.rule.lhs.replace('Err_', '')][0].rhs[0]
elif st.rule.lhs == 'InsertErr':
return None
else:
return st.rule.rhs[0]
def rule_updates_repair_operations(grammar, st):
if st.rule.lhs.startswith('Err_'):
if st.rule.rhs:
return '<<$' + grammar[st.rule.lhs.replace('Err_', '')][0].rhs[0] + '$>>'
else:
return '<<+' + grammar[st.rule.lhs.replace('Err_', '')][0].rhs[0] + '+>>'
elif st.rule.lhs == 'InsertErr':
return '<<-' + st.rule.rhs[0] + '->>'
else:
return st.rule.rhs[0]
def get_repaired_seq_for_1(rule_sequences, grammar):
rule_seq = sorted(rule_sequences[0], key=lambda st: st.sent_pos)
abstract_fixed_seq = list(filter(lambda x: x, map(rule_updates, rule_seq)))
rul_upd = partial(rule_updates_rhs, grammar)
fixed_seq = list(filter(lambda x: x, map(rul_upd, rule_seq)))
rul_upd_repair_ops = partial(rule_updates_repair_operations, grammar)
fixed_seq_ops = list(filter(lambda x: x, map(rul_upd_repair_ops, rule_seq)))
return (' '.join(abstract_fixed_seq), ' '.join(fixed_seq), ' '.join(fixed_seq_ops), rule_sequences[1], rule_sequences[2])
def fixed_lexed_prog(lexed_prog, grammar, max_cost):
def run_parse(sentence):
parser = ErrorEarleyParse(sentence, grammar, max_cost)
parser.parse()
return parser.get_fixed_seq(), parser.chart
parsed, _ = run_parse(lexed_prog)
if parsed is None:
return [(None, None, None, None, None)]
elif not parsed:
return [(None, None, None, None, None)]
return list(map(lambda sol: get_repaired_seq_for_1(sol, grammar), parsed))
def repair_prog(actual_tokens, fix_seq_operations):
# Reverse program `actual_tokens` for efficient popping
actual_tkns = actual_tokens
actual_tkns = list(reversed(actual_tkns.split()))
repaired = ""
indents = ''
if " not_in " in fix_seq_operations:
fix_seq_operations = fix_seq_operations.replace(" not_in ", " not in ")
if " is_not " in fix_seq_operations:
fix_seq_operations = fix_seq_operations.replace(" is_not ", " is not ")
for tk in fix_seq_operations.split():
if tk.startswith('<<+'):
if tk[3:-3] == '_INDENT_':
indents += ' ' * 4
repaired += ' ' * 4
elif tk[3:-3] == '_DEDENT_':
indents = indents[4:]
repaired = repaired[:-4]
elif tk[3:-3] == '_NEWLINE_':
repaired += '\n'
repaired += indents
elif tk[3:-3] in ['pass', 'break', 'continue', 'return', 'yield']:
flag = (repaired[-2] == ':') if repaired else False
repaired += '\n'
repaired += indents
repaired += (' ' * 4 if flag else '') + tk[3:-3] + " "
elif tk[3:-3] == '_NAME_':
repaired += 'simple_name '
else:
repaired += tk[3:-3] + " "
elif tk.startswith('<<$'):
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
if tk[3:-3] == '_INDENT_':
indents += ' ' * 4
repaired += ' ' * 4
elif tk[3:-3] == '_DEDENT_':
indents = indents[4:]
repaired = repaired[:-4]
elif tk[3:-3] == '_NEWLINE_':
repaired += '\n'
repaired += indents
elif tk[3:-3] == '_NAME_':
repaired += 'simple_name '
elif tk[3:-3] in ['pass', 'break', 'continue', 'return', 'yield']:
flag = (repaired[-2] == ':') if repaired else False
repaired += '\n'
repaired += indents
repaired += (' ' * 4 if flag else '') + tk[3:-3] + " "
else:
repaired += tk[3:-3] + " "
elif tk.startswith('<<-'):
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
elif tk == '_INDENT_':
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
indents += ' ' * 4
repaired += ' ' * 4
elif tk == '_DEDENT_':
# Added checks because of problem with 6100+ in test set
# It was popping from the empty list
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
indents = indents[4:]
repaired = repaired[:-4]
elif tk == '_NEWLINE_':
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
repaired += '\n'
repaired += indents
elif tk == '_ENDMARKER_':
repaired += '\n'
elif tk == '_arrow_':
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
repaired += "-> "
elif tk in ['_NAME_', '_STRING_', '_NUMBER_']:
if actual_tkns:
repaired += actual_tkns.pop(-1) + " "
else:
return repaired
else:
if actual_tkns:
actual_tkns.pop(-1)
else:
return repaired
if tk == '[':
repaired += tk
elif tk in ['(', ')', ']', '.', ',', ':'] and len(repaired) > 0:
if tk == ',' or tk == ')' or tk == ':':
repaired = (repaired[:-1] if repaired[-1] == ' ' else repaired) + tk + " "
else:
repaired = (repaired[:-1] if repaired[-1] == ' ' else repaired) + tk
else:
repaired += tk + " "
return repaired
def concretize_seq(seq, grammar):
tags = seq.split()
tokens = [grammar[t][0].rhs[0] for t in tags]
return ' '.join(tokens)
def get_token_list(prog, terminals):
lexer = Lexer(terminals)
return lexer.lex(prog)
def get_actual_token_list(prog, terminals):
lexer = Lexer(terminals)
return lexer.clean_with_lex(prog)
def main():
"""
Main.
"""
parser_description = ("Runs the Earley parser according to a given "
"grammar.")
parser = argparse.ArgumentParser(description=parser_description)
parser.add_argument('draw', nargs='?', default=False)
parser.add_argument('grammar_file', help="Filepath to grammer file")
parser.add_argument('input_program', help="The input program to parse")
parser.add_argument('--show-chart', action="store_true")
args = parser.parse_args()
grammar = ErrorGrammar.load_grammar(args.grammar_file)
terminals = grammar.get_alphabet()
# grammar.update_error_grammar([')'], [']'])
def run_parse(sentence):
parse = ErrorEarleyParse(sentence, grammar)
parse.parse()
return parse.get_rules(), parse.chart
program_path = Path(args.input_program)
# # Strip the sentence of any puncutation.
# stripped_sentence = sentence
# for p in string.punctuation:
# stripped_sentence = stripped_sentence.replace(p, '')
input_program = program_path.read_text()
# print(input_program)
lexer = Lexer(terminals)
tokenized_prog = lexer.lex(input_program)
# print(parse(input_program))
# print(asttokens.ASTTokens(input_program, parse=True).tokens)
print(tokenized_prog.replace('_NEWLINE_ ', '\n'))
print('-----------------')
lexed_funs = filter(lambda f: f != '', map(lambda ff: lexer.lex('def ' + ff[1]) if ff[0] > 0 else lexer.lex(ff[1]), enumerate(input_program.split('\ndef '))))
for lexed in lexed_funs:
parsed, _ = run_parse(lexed)
if parsed is None:
print(False)
pass
elif not parsed:
print(False)
pass
# print(parsed)
# parsed.pretty_print()
print(True)
# parsed, chart = run_parse(tokenized_prog)
# if args.show_chart:
# print(chart)
# print('\n')
# if parsed is None:
# print(input_program + '\n')
# else:
# if args.draw:
# parsed.draw()
# else:
# print("True")
# # parsed.pretty_print()
if __name__ == '__main__':
main()
| [
"george.p.sakkas@gmail.com"
] | george.p.sakkas@gmail.com |
4fd52351a670070b2a03c71c3135823c46cdb129 | 4526ed71f39d70111c3787ec90b4932a183c452c | /2016/Pyquen_WToMuNu_TuneZ2_8160GeV_pythia6_reverse_cfi.py | f47777dec69a0a32fa2aa7721bb9c36a5c2f145d | [] | no_license | CMS-HIN-dilepton/MCRequest | 773f414739efc529dc957a044232478b1c4f1c03 | ff49d22fde2c4a006fe7fa02d4cf53d794f91888 | refs/heads/master | 2021-05-02T12:16:51.891664 | 2020-06-20T18:35:52 | 2020-06-20T18:35:52 | 45,127,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,712 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.PythiaUEZ2Settings_cfi import *
generator = cms.EDFilter("PyquenGeneratorFilter",
comEnergy = cms.double(8160.0),
aBeamTarget = cms.double(208.0),
protonSide = cms.untracked.int32(2),
qgpInitialTemperature = cms.double(1.0), ## initial temperature of QGP; allowed range [0.2,2.0]GeV;
qgpProperTimeFormation = cms.double(0.1), ## proper time of QGP formation; allowed range [0.01,10.0]fm/c;
hadronFreezoutTemperature = cms.double(0.14),
doRadiativeEnLoss = cms.bool(True), ## if true, perform partonic radiative en loss
doCollisionalEnLoss = cms.bool(False),
qgpNumQuarkFlavor = cms.int32(0), ## number of active quark flavors in qgp; allowed values: 0,1,2,3
numQuarkFlavor = cms.int32(0), ## to be removed
doIsospin = cms.bool(True),
angularSpectrumSelector = cms.int32(0), ## angular emitted gluon spectrum :
embeddingMode = cms.bool(False),
backgroundLabel = cms.InputTag("generator"), ## ineffective in no mixing
doQuench = cms.bool(False),
bFixed = cms.double(0.0), ## fixed impact param (fm); valid only if cflag_=0
cFlag = cms.int32(0), ## centrality flag
bMin = cms.double(0.0), ## min impact param (fm); valid only if cflag_!=0
bMax = cms.double(0.0), ## max impact param (fm); valid only if cflag_!=0
pythiaPylistVerbosity = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(True),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(pythiaUESettingsBlock,
processParameters = cms.vstring('MSEL = 0 !User defined processes',
'MSUB(2) = 1 !W production',
'MDME(190,1) = 0 !W decay into dbar u',
'MDME(191,1) = 0 !W decay into dbar c',
'MDME(192,1) = 0 !W decay into dbar t',
'MDME(194,1) = 0 !W decay into sbar u',
'MDME(195,1) = 0 !W decay into sbar c',
'MDME(196,1) = 0 !W decay into sbar t',
'MDME(198,1) = 0 !W decay into bbar u',
'MDME(199,1) = 0 !W decay into bbar c',
'MDME(200,1) = 0 !W decay into bbar t',
'MDME(205,1) = 0 !W decay into bbar tp',
'MDME(206,1) = 0 !W decay into e+ nu_e',
'MDME(207,1) = 1 !W decay into mu+ nu_mu',
'MDME(208,1) = 0 !W decay into tau+ nu_tau'),
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.1 $'),
name = cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/Configuration/GenProduction/python/HI/Pyquen_WToMuNu_TuneZ2_5023GeV_pythia6_cfi.py,v $'),
annotation = cms.untracked.string('PYQUEN-Wmunu Tune Z2 at 5.023 TeV')
)
#ProductionFilterSequence = cms.Sequence(hiSignal)
ProductionFilterSequence = cms.Sequence(generator)
| [
"emilien.chapon@cern.ch"
] | emilien.chapon@cern.ch |
d62b565f143c85773fee608f690ebd1561940858 | f182295caf301abd4bc44e74655fd9b5f6ad9e5e | /polls/migrations/0002_auto_20160728_1305.py | 188abaa0d5e10cfef541803fba912076ec3332b6 | [] | no_license | vinay13/Yugma-NxtLife | eaabbc48112c26a58ed4781d1be4d6fb071b3927 | 019e68e25636a9373d5648f03f743af1c6e7d945 | refs/heads/master | 2020-12-20T18:54:14.510195 | 2016-08-10T14:32:22 | 2016-08-10T14:32:22 | 63,804,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,069 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-28 13:05
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('polls', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='option',
name='options_type',
field=models.PositiveSmallIntegerField(choices=[(2, 'MULTIPLE'), (1, 'SINGLE')], default=1),
),
migrations.AlterField(
model_name='poll',
name='created_at',
field=models.DateTimeField(default=datetime.datetime.now),
),
migrations.AlterField(
model_name='poll',
name='modified_at',
field=models.DateTimeField(default=datetime.datetime.now),
),
migrations.AlterField(
model_name='poll',
name='poll_type',
field=models.PositiveSmallIntegerField(choices=[(2, 'CLASS'), (1, 'SCHOOL')], default=1),
),
]
| [
"aitvinay@gmail.com"
] | aitvinay@gmail.com |
b05505c9b445af3674a860fe8d4fd78dda734376 | b9cd1b9758e58f00335900fd120e1d47c23600ce | /tests/test_pipeline_chipseq.py | 543d39616238213767af72ed8a467fa36a735e65 | [
"Apache-2.0"
] | permissive | Multiscale-Genomics/mg-process-fastq | 4fb7fef68526237f06312a3f137df031a448731c | 50c7115c0c1a6af48dc34f275e469d1b9eb02999 | refs/heads/master | 2020-04-12T06:46:01.100270 | 2018-11-19T16:05:03 | 2018-11-19T16:05:03 | 64,320,140 | 2 | 4 | Apache-2.0 | 2018-11-16T16:54:54 | 2016-07-27T15:29:25 | Python | UTF-8 | Python | false | false | 7,060 | py | """
.. See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest
from basic_modules.metadata import Metadata
from process_chipseq import process_chipseq
@pytest.mark.chipseq
@pytest.mark.pipeline
def test_chipseq_pipeline_00():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \\
--lang=python \\
--library_path=${HOME}/bin \\
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \\
--log_level=debug \\
process_chipseq.py \\
--taxon_id 9606 \\
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \\
--assembly GRCh38 \\
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = {
'genome': resource_path + 'macs2.Human.GCA_000001405.22.fasta',
'loc': resource_path + 'macs2.Human.DRR000150.22.fastq',
'index': resource_path + 'macs2.Human.GCA_000001405.22.fasta.bwa.tar.gz'
}
metadata = {
"genome": Metadata(
"Assembly", "fasta", files['genome'], None,
{'assembly': 'GCA_000001405.22'}),
"loc": Metadata(
"data_chip_seq", "fastq", files['loc'], None,
{'assembly': 'GCA_000001405.22'}
),
"index": Metadata(
"Index", "bwa_index", files['index'], files['genome'],
{'assembly': 'GCA_000001405.22', "tool": "bwa_indexer"}),
}
root_name = files['loc'].split("/")
root_name[-1] = root_name[-1].replace('.fastq', '')
files_out = {
"bam": files['loc'].replace(".fastq", ".bam"),
"bai": files['loc'].replace(".fastq", ".bai"),
"filtered": files['loc'].replace(".fastq", "_filtered.bam"),
"output": files['loc'].replace(".fastq", ".tsv"),
'narrow_peak': '/'.join(root_name) + '_filtered_peaks.narrowPeak',
'summits': '/'.join(root_name) + '_filtered_summits.bed',
'broad_peak': '/'.join(root_name) + '_filtered_peaks.broadPeak',
'gapped_peak': '/'.join(root_name) + '_filtered_peaks.gappedPeak'
}
chipseq_handle = process_chipseq({"macs_nomodel_param": True, "execution": resource_path})
chipseq_files, chipseq_meta = chipseq_handle.run(files, metadata, files_out) # pylint: disable=unused-variable
print(chipseq_files)
# Add tests for all files created
for f_out in chipseq_files:
print("CHIP-SEQ RESULTS FILE:", f_out)
# assert chipseq_files[f_out] == files_out[f_out]
assert os.path.isfile(chipseq_files[f_out]) is True
assert os.path.getsize(chipseq_files[f_out]) > 0
try:
os.remove(chipseq_files[f_out])
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
@pytest.mark.chipseq
@pytest.mark.pipeline
def test_chipseq_pipeline_01():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \\
--lang=python \\
--library_path=${HOME}/bin \\
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \\
--log_level=debug \\
process_chipseq.py \\
--taxon_id 9606 \\
--genome /<dataset_dir>/Human.GCA_000001405.22.fasta \\
--assembly GRCh38 \\
--file /<dataset_dir>/DRR000150.22.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
files = {
'genome_public': resource_path + 'macs2.Human.GCA_000001405.22.fasta',
'loc': resource_path + 'macs2.Human.DRR000150.22.fastq',
'index_public': resource_path + 'macs2.Human.GCA_000001405.22.fasta.bwa.tar.gz'
}
metadata = {
"genome_public": Metadata(
"Assembly", "fasta", files['genome_public'], None,
{'assembly': 'GCA_000001405.22'}),
"loc": Metadata(
"data_chip_seq", "fastq", files['loc'], None,
{'assembly': 'GCA_000001405.22'}
),
"index_public": Metadata(
"Index", "bwa_index", files['index_public'], files['genome_public'],
{'assembly': 'GCA_000001405.22', "tool": "bwa_indexer"}),
}
root_name = files['loc'].split("/")
root_name[-1] = root_name[-1].replace('.fastq', '')
files_out = {
"bam": files['loc'].replace(".fastq", ".bam"),
"bai": files['loc'].replace(".fastq", ".bai"),
"filtered": files['loc'].replace(".fastq", "_filtered.bam"),
"output": files['loc'].replace(".fastq", ".tsv"),
'narrow_peak': '/'.join(root_name) + '_filtered_peaks.narrowPeak',
'summits': '/'.join(root_name) + '_filtered_summits.bed',
'broad_peak': '/'.join(root_name) + '_filtered_peaks.broadPeak',
'gapped_peak': '/'.join(root_name) + '_filtered_peaks.gappedPeak'
}
chipseq_handle = process_chipseq({"macs_nomodel_param": True, "execution": resource_path})
chipseq_files, chipseq_meta = chipseq_handle.run(files, metadata, files_out) # pylint: disable=unused-variable
print(chipseq_files)
# Add tests for all files created
for f_out in chipseq_files:
print("CHIP-SEQ RESULTS FILE:", f_out)
# assert chipseq_files[f_out] == files_out[f_out]
assert os.path.isfile(chipseq_files[f_out]) is True
assert os.path.getsize(chipseq_files[f_out]) > 0
try:
os.remove(chipseq_files[f_out])
except OSError as ose:
print("Error: %s - %s." % (ose.filename, ose.strerror))
| [
"mark.mcdowall@gmail.com"
] | mark.mcdowall@gmail.com |
da7c301915f51e93207435c64624819447942c59 | 8f32c9ba8d3a0157e03e6f1cecd01ef95705368d | /Vjezba.AboutMeApp/config.py | 89b6ba1ed1cc68c0f7a2c52f345beb33a2e555ef | [] | no_license | v3nc1/WebProgramiranje | 656b506bccfb9e6dd2339cb39f46d78f25b30a89 | a0505575e46eeaa129e7452382dbf4e9de081a32 | refs/heads/master | 2020-04-03T19:49:46.791824 | 2018-11-21T16:49:59 | 2018-11-21T16:49:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 136 | py |
class Config:
name = "Venci"
surname = "Ivoš"
banka = "Dalmatinska banka d.d."
iban = "HR1723600001101234565"
| [
"noreply@github.com"
] | v3nc1.noreply@github.com |
c4e2b115dbe1fb2ca6e5626b223b88a4f3dde73e | b0e67fbd4c42aba24f7d4bccb99e9aa037c0b7d5 | /lda/train_LDA.py | 39ade7589cc4a9b7f6176b106691125a03142547 | [] | no_license | gombru/SocialMediaWeakLabeling | f979aea8218be115758ff8e1e9a945a701ac99b9 | 518437903ba7370a4098303a41196a08f1d6a58e | refs/heads/master | 2022-02-26T17:49:08.997335 | 2022-02-10T12:54:57 | 2022-02-10T12:54:57 | 84,461,511 | 15 | 4 | null | null | null | null | UTF-8 | Python | false | false | 5,004 | py | # Trains and saves an LDA model with the given text files.
from nltk.tokenize import RegexpTokenizer
from stop_words import get_stop_words
from nltk.stem.porter import PorterStemmer
from gensim import corpora, models
import glob
import string
import random
import numpy as np
whitelist = string.letters + string.digits + ' '
instagram_text_data_path = '../../../datasets/SocialMedia/captions_resized_1M/cities_instagram/'
model_path = '../../../datasets/SocialMedia/models/LDA/lda_model_cities_instagram_1M_500_5000chunck.model'
words2filter = ['rt','http','t','gt','co','s','https','http','tweet','markars_','photo','pictur','picture','say','photo','much','tweet','now','blog']
cities = ['london','newyork','sydney','losangeles','chicago','melbourne','miami','toronto','singapore','sanfrancisco']
num_topics = 500
threads = 8
passes = 1 #Passes over the whole corpus
chunksize = 5000 #Update the model every 10000 documents
# See https://radimrehurek.com/gensim/wiki.html
update_every = 1
repetition_threshold = 20
#Initialize Tokenizer
tokenizer = RegexpTokenizer(r'\w+')
# create English stop words list
en_stop = get_stop_words('en')
# add own stop words
for w in words2filter:
en_stop.append(w)
# Create p_stemmer of class PorterStemmer
p_stemmer = PorterStemmer()
posts_text = []
texts = [] #List of lists of tokens
# -- LOAD DATA FROM INSTAGRAM --
for city in cities:
print "Loading data from " + city
for file_name in glob.glob(instagram_text_data_path + city + "/*.txt"):
caption = ""
filtered_caption = ""
file = open(file_name, "r")
for line in file:
caption = caption + line
# Replace hashtags with spaces
caption = caption.replace('#', ' ')
# Keep only letters and numbers
for char in caption:
if char in whitelist:
filtered_caption += char
posts_text.append(filtered_caption.decode('utf-8').lower())
# print filtered_caption.decode('utf-8')
print "Number of posts: " + str(len(posts_text))
print "Creating tokens"
c= 0
for t in posts_text:
c += 1
if c % 10000 == 0:
print c
try:
t = t.lower()
tokens = tokenizer.tokenize(t)
# remove stop words from tokens
stopped_tokens = [i for i in tokens if not i in en_stop]
# stem token
text = [p_stemmer.stem(i) for i in stopped_tokens]
# add proceced text to list of lists
texts.append(text)
except:
continue
#Remove element from list if memory limitation TODO
#del tweets_text[0]
posts_text = []
# Remove words that appear less than N times
print "Removing words appearing less than: " + str(repetition_threshold)
from collections import defaultdict
frequency = defaultdict(int)
for text in texts:
for token in text:
frequency[token] += 1
texts = [[token for token in text if frequency[token] > repetition_threshold] for text in texts]
# Construct a document-term matrix to understand how frewuently each term occurs within each document
# The Dictionary() function traverses texts, assigning a unique integer id to each unique token while also collecting word counts and relevant statistics.
# To see each token unique integer id, try print(dictionary.token2id)
dictionary = corpora.Dictionary(texts)
print(dictionary)
# TODO check this
# dictionary.compactify()
# Filter out tokens that appear in less than no_below documents (absolute number) or more than no_above documents (fraction of total corpus size, not absolute number).
# after (1) and (2), keep only the first keep_n most frequent tokens (or keep all if None).
# dictionary.filter_extremes(no_below=no_below, no_above=no_above, keep_n=None)
# dictionary.compactify() # remove gaps in id sequence after words that were removed
# Convert dictionary to a BoW
# The result is a list of vectors equal to the number of documents. Each document containts tumples (term ID, term frequency)
corpus = [dictionary.doc2bow(text) for text in texts]
texts = []
#Randomize training elements
corpus = np.random.permutation(corpus)
# Generate an LDA model
print "Creating LDA model"
# the minimum_probability=0 argument is necessary in order for
# gensim to return the full document-topic-distribution matrix. If
# this argument is omitted and left to the gensim default of 0.01,
# then all document-topic weights below that threshold will be
# returned as NaN, violating the subsequent LDAvis assumption that
# all rows (documents) in the document-topic-distribution matrix sum
# to 1.
#ldamodel = models.ldamodel.LdaModel(corpus, num_topics=num_topics, id2word = dictionary, passes=passes, minimum_probability=0)
ldamodel = models.LdaMulticore(corpus, num_topics=num_topics, id2word = dictionary, chunksize=chunksize, passes=passes, workers=threads, minimum_probability=0)
ldamodel.save(model_path)
# Our LDA model is now stored as ldamodel
print(ldamodel.print_topics(num_topics=8, num_words=10))
print "DONE"
| [
"raulgombru@gmail.com"
] | raulgombru@gmail.com |
09b194ff61b3e409331b5fb117555aaaa998c26a | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/find_max_20200722114432.py | 63139ffb63e0f85ee0899b804e0ff82130382654 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | items = [6,20,8,19,56,23,87,41,49,53]
def find_max(items):
# Breaking condition
if len(items) == 1:
return items[0]
op1 = items[0]
op2 = find_max(items[1:])
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
907769470c06a9adb96a73b04f9ea62d43e0d19c | 0ee4debe412b996de7f5a592800515ae7104c5a5 | /scripts/artifacts/fitbitHeart.py | 6710cc413d11222ce7d902507ea13b4b8ec52313 | [
"MIT"
] | permissive | kibaffo33/ALEAPP | af7eebd9d4ab078c57c4108ebab0c80c89df8630 | ca50b7d665dccb846ff601b7b797d754eb8100d9 | refs/heads/master | 2022-06-15T03:55:37.407875 | 2022-06-13T20:39:47 | 2022-06-13T20:39:47 | 243,058,738 | 1 | 0 | null | 2020-02-25T17:29:43 | 2020-02-25T17:29:36 | null | UTF-8 | Python | false | false | 1,477 | py | import sqlite3
import textwrap
from scripts.artifact_report import ArtifactHtmlReport
from scripts.ilapfuncs import logfunc, tsv, timeline, is_platform_windows, open_sqlite_db_readonly
def get_fitbitHeart(files_found, report_folder, seeker, wrap_text):
file_found = str(files_found[0])
db = open_sqlite_db_readonly(file_found)
cursor = db.cursor()
cursor.execute('''
SELECT
datetime("DATE_TIME"/1000, 'unixepoch'),
AVERAGE_HEART_RATE,
RESTING_HEART_RATE
FROM HEART_RATE_DAILY_SUMMARY
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Fitbit Heart Rate Summary')
report.start_artifact_report(report_folder, 'Fitbit Heart Rate Summary')
report.add_script()
data_headers = ('Timestamp','Avg. Heart Rate','Resting Heart Rate')
data_list = []
for row in all_rows:
data_list.append((row[0],row[1],row[2]))
report.write_artifact_data_table(data_headers, data_list, file_found)
report.end_artifact_report()
tsvname = f'Fitbit Heart Rate Summary'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'Fitbit Heart Rate Summary'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No Fitbit Heart Rate Summary data available')
db.close()
| [
"abrignoni@gmail.com"
] | abrignoni@gmail.com |
e16937fd318b748f7699903b9c9c0f36cc120fd6 | 89884fd3dc126375513dd71066397c879805ca79 | /sample_config.py | eb858000dfe24a648bcf41a840240e2c7c5f8dd4 | [] | no_license | VSPMG143/URL-Torrent-To-TG | 81783787bc76ea3857b097e98859bbc686e1c3ab | 8ab9ac24aad5db0d73eb112d88c9d3a9b9112619 | refs/heads/main | 2023-02-11T06:45:18.837795 | 2021-01-12T09:43:05 | 2021-01-12T09:43:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,757 | py | import os
class Config(object):
# get a token from @BotFather
TG_BOT_TOKEN = os.environ.get("TG_BOT_TOKEN", "")
# The Telegram API things
APP_ID = int(os.environ.get("APP_ID", 12345))
API_HASH = os.environ.get("API_HASH")
OWNER_ID = int(os.environ.get("OWNER_ID", 1204927413))
# Get these values from my.telegram.org
# to store the channel ID who are authorized to use the bot
AUTH_CHANNEL = set(int(x) for x in os.environ.get("AUTH_CHANNEL", "").split())
# the download location, where the HTTP Server runs
DOWNLOAD_LOCATION = "./DOWNLOADS"
# Telegram maximum file upload size
MAX_FILE_SIZE = 50000000
TG_MAX_FILE_SIZE = 2097152000
FREE_USER_MAX_FILE_SIZE = 50000000
# chunk size that should be used with requests
CHUNK_SIZE = int(os.environ.get("CHUNK_SIZE", 128))
# default thumbnail to be used in the videos
DEF_THUMB_NAIL_VID_S = os.environ.get("DEF_THUMB_NAIL_VID_S", "https://placehold.it/90x90")
# maximum message length in Telegram
MAX_MESSAGE_LENGTH = 4096
# set timeout for subprocess
PROCESS_MAX_TIMEOUT = 3600
#
ARIA_TWO_STARTED_PORT = int(os.environ.get("ARIA_TWO_STARTED_PORT", 6800))
EDIT_SLEEP_TIME_OUT = int(os.environ.get("EDIT_SLEEP_TIME_OUT", 15))
MAX_TIME_TO_WAIT_FOR_TORRENTS_TO_START = int(os.environ.get("MAX_TIME_TO_WAIT_FOR_TORRENTS_TO_START", 600))
MAX_TG_SPLIT_FILE_SIZE = int(os.environ.get("MAX_TG_SPLIT_FILE_SIZE", 1072864000))
# add config vars for the display progress
FINISHED_PROGRESS_STR = os.environ.get("FINISHED_PROGRESS_STR", "✪")
UN_FINISHED_PROGRESS_STR = os.environ.get("UN_FINISHED_PROGRESS_STR", "○")
# add offensive API
TG_OFFENSIVE_API = os.environ.get("TG_OFFENSIVE_API", None)
CUSTOM_FILE_NAME = os.environ.get("CUSTOM_FILE_NAME", "")
LEECH_COMMAND = os.environ.get("LEECH_COMMAND", "gen@Universal_leecher_Robot")
YTDL_COMMAND = os.environ.get("YTDL_COMMAND", "ytdl@Universal_leecher_Robot")
DESTINATION_FOLDER = os.environ.get("DESTINATION_FOLDER", "downloads")
CANCEL_COMMAND_G = os.environ.get("CANCEL_COMMAND_G", "cancel@Universal_leecher_Robot")
GET_SIZE_G = os.environ.get("GET_SIZE_G", "getsize")
STATUS_COMMAND = os.environ.get("STATUS_COMMAND", "status@Universal_leecher_Robot")
SAVE_THUMBNAIL = os.environ.get("SAVE_THUMBNAIL", "savethumbnail@Universal_leecher_Robot")
CLEAR_THUMBNAIL = os.environ.get("CLEAR_THUMBNAIL", "clearthumbnail@Universal_leecher_Robot")
UPLOAD_AS_DOC = os.environ.get("UPLOAD_AS_DOC", "upload_as_doc@Universal_leecher_Robot")
PYTDL_COMMAND_G = os.environ.get("PYTDL_COMMAND_G", "pytdl@Universal_leecher_Robot")
LOG_COMMAND = os.environ.get("LOG_COMMAND", "log@Universal_leecher_Robot")
| [
"tushalverma69@gmail.com"
] | tushalverma69@gmail.com |
80f0dc3503964a694ce88127e5b38b87ec5ed35a | 6c075935a769b3df905f0c0bcfdafbec76f65a02 | /HistoricalVehicleApplication-master/MappingApplicationEnvironment/bin/rst2latex.py | 8a199640ba2984abcf460ffe1be8150a53cb8929 | [] | no_license | EdwardDeaver/HistoricalSnowPlowMapping | 61ed61e42934ab674b8fd999e29a9e8b2227a0f2 | c2370d56f30a111dcbe73b2ac0cc0b50433bb44e | refs/heads/master | 2022-12-15T17:11:03.493050 | 2020-09-17T22:30:55 | 2020-09-17T22:30:55 | 281,167,735 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 860 | py | #!/home/ec2-user/HistoricalVehicleApplication/MappingApplicationEnvironment/bin/python3
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description)
| [
"deaverec@lemoyne.edu"
] | deaverec@lemoyne.edu |
932ec70aaf41bd35af14a43939a003ec41918efb | a26442bb9869b70ac2d7c8023c042994316cd5a7 | /Stock/mysite/urls.py | 442351aa5dc0b5feb5ee97e86ad1c0bc6df0ac12 | [] | no_license | shashank-100/Stock-market-prediction | 60fe79e501c0ba45228f6f12df3820304f1f3ad9 | 22496b6cf09a285c90350f1d8d3fdab4f2ea85ec | refs/heads/master | 2022-05-14T13:05:22.734598 | 2020-10-01T12:00:24 | 2020-10-01T12:00:24 | 212,944,477 | 0 | 0 | null | 2022-04-22T22:30:27 | 2019-10-05T04:38:19 | Python | UTF-8 | Python | false | false | 508 | py | from django.contrib import admin
from django.urls import path, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from .core import views
urlpatterns = [
path('', views.home, name='home'),
path('signup/', views.signup, name='signup'),
path('stocks/', views.stocks, name='stocks'),
path('accounts/', include('django.contrib.auth.urls')),
path('admin/', admin.site.urls),
path('results/',views.results, name='results')
]
urlpatterns+=staticfiles_urlpatterns() | [
"noreply@github.com"
] | shashank-100.noreply@github.com |
9d3f1e1eaaf40864ef9e266b4fd7d25f9d328b21 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /mYGipMffRTYxYmv5i_3.py | 1c5ebcf46c86f6f779f84e38b73917754d45490f | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,929 | py |
import itertools
def simple_equation(a,b,c):
numbers = [a,b,c]
for eachcombo in itertools.permutations(numbers,2):
first_num = eachcombo[0]
second_num = eachcombo[1]
if c != first_num and c != second_num:
if first_num + second_num == c:
return '{}+{}={}'.format(first_num,second_num,c)
elif first_num - second_num == c:
return '{}-{}={}'.format(first_num,second_num,c)
elif first_num * second_num == c:
return '{}*{}={}'.format(first_num,second_num,c)
try:
if first_num // second_num == c:
return '{}/{}={}'.format(first_num,second_num,c)
except Exception as e:
continue
elif b != first_num and b != second_num:
if first_num + second_num == b:
return '{}+{}={}'.format(first_num,second_num,b)
elif first_num - second_num == b:
return '{}-{}={}'.format(first_num,second_num,b)
elif first_num * second_num == b:
return '{}*{}={}'.format(first_num,second_num,b)
try:
if first_num // second_num == b:
return '{}/{}={}'.format(first_num,second_num,b)
except Exception as e:
continue
elif a != first_num and a != second_num:
if first_num + second_num == a:
return '{}+{}={}'.format(first_num,second_num,a)
elif first_num - second_num == a:
return '{}-{}={}'.format(first_num,second_num,a)
elif first_num * second_num == a:
return '{}*{}={}'.format(first_num,second_num,a)
try:
if first_num // second_num == a:
return '{}/{}={}'.format(first_num,second_num,a)
except Exception as e:
continue
return ''
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
30599484b003006d885094cb5381f174d82072c1 | 1506ff625b6ad1ee7117af54044acd72dc6f5c81 | /three.py | 7869afed84869cd740ab516b1e31db37336f24f1 | [] | no_license | Nicortiz72/ICPC_AlgorithmsProblems | a4e7c1bf2b4f7804c1c7da61e39e775c4bc6a5ae | 462b1e696d8c9e27a0363eb257dfce7e2c580306 | refs/heads/master | 2023-07-12T10:14:30.609834 | 2021-08-16T17:55:10 | 2021-08-16T17:55:10 | 291,610,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | from sys import *
#Nicolas Ortiz
#Competition problem three
def main():
n=int(stdin.readline())
while n!=0:
l=list(bin(n-1))
ansl=[]
p=0
for j in range(len(l)-1,1,-1):
if(l[j]=='1'):
ansl.append(str(3**p))
p+=1
if(len(ansl)!=0):
print("{ ",end="")
print(", ".join(ansl),end="")
print(" }")
else: print("{ }")
n=int(stdin.readline())
main()
| [
"noreply@github.com"
] | Nicortiz72.noreply@github.com |
7835a7e6cad0eb7a9e5f83067e90e2f34c5bf396 | 85ddc3fa8829ee821cef751943f11303e8729bc0 | /PythonFiles/ML code/scripts/unet_predict_JP_2Dfor3D_noEnsemble.py | f5fb0e70facfb203c290b4d887039952e9851dd0 | [
"MIT"
] | permissive | CS527Applied-Machine-Learning-for-Games/Team-Scrubs | 84b69be9fa57e91e7223fe61c549a0dbc0fcb1a9 | e1ac40972c86b4918d866c69c8444f7837ad10af | refs/heads/master | 2023-02-01T00:50:36.061015 | 2020-11-24T20:32:56 | 2020-11-24T20:32:56 | 293,705,692 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,464 | py | import numpy as np
import os
import sys
import cv2
from keras.models import Model
from keras import backend as K
from keras.optimizers import Adam
from keras.utils import plot_model
import tensorflow as tf
from numpy import inf
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import shutil
import SimpleITK as sitk
# Change to your own directory
sys.path.append('../')
#from unet_train_2Dfor3D_mc_Encoding_beta_cell_4label import *
from utils.kerasutils import get_image, correct_data_format, save_model_summary, get_channel_axis
from utils.imageutils import map_label
from utils.input_data_mc import InputSegmentationArrays
from keras.models import load_model
import pandas as pd
from nets.unet_HW import build_net
modelPath = './models/mixlabel_z_12_27_19_model.h5'
image_weight = 0.0005
nlabel=5
ncluster=(np.zeros(nlabel,np.int) + 1)
predict_IDs = pd.read_csv('../data/test_IDs', delimiter=' ')['ID'].values
cc_weight=0
image_weight=.0005
# interval=int(sys.argv[3])
output_folder= './predictions/'
os.environ['CUDA_VISIBLE_DEVICES'] = sys.argv[1]
model_args = dict(
# num_classes=input_arrays.get_num_classes(),
num_classes=nlabel,
base_num_filters=32,
image_size=(512, 512),
# image_size=(1024, 1024),
dropout_rate=0.5,
optimizer=Adam(lr=5e-5),
conv_order='conv_first',
kernel_size=3,
kernel_cc_weight=cc_weight,
activation='relu',
net_depth=5,
convs_per_depth=2,
noise_std=1.0,
ncluster=ncluster,
unsupervised_weight=image_weight
# loss=combine_loss([EncodingLength_2D(image_weight=image_weight, L=nlabel), exp_categorical_crossentropy_Im_mc(exp=1.0, class_weights=class_weights,nclass=nlabel, ncluster=ncluster)], [0.5, 0.5])
# loss=combine_loss([VoI_2D(image_weight=image_weight, sigma=sigma), exp_categorical_crossentropy_Im_mc(exp=exp, class_weights=class_weights,nclass=nclass, ncluster=ncluster)], [1.0-alpha, alpha])
)
net = build_net(**model_args)
segmentation_model = Model(inputs=net.input, outputs=net.get_layer('segmentation').output)
segmentation_model.load_weights(modelPath)
def transfer_image(img):
# max_v=np.max(img)
# min_v=np.min(img)
#
# new_img=(img-min_v)*255/(max_v-min_v+1E-10)
# return new_img
return img * 10000
def generator_prediction(id):
prefix = '../data/Image_3D/'+id+"/z/"+id
flag=1
length=0
while flag>0:
fn=prefix+'_2D_z_'+str(length)+'.nii.gz'
# print(fn)
if os.path.isfile(fn):
length += 1
else:
flag=0
# print(length)
# kk
imgs = np.zeros((length, 512, 512))
c = 0
for i in range(0,length):
imfn = prefix+'_2D_z_'+str(i)+'.nii.gz'
img = transfer_image(sitk.GetArrayFromImage(sitk.ReadImage(imfn)))
imgs[c,:,:]=img
# probs[c,:,:,:]=prob
c=c+1
imgs = np.expand_dims(imgs, axis=-1)
return imgs
for i in range(len(predict_IDs)):
id=predict_IDs[i]
print(id)
imgs = generator_prediction(id)
sz = imgs.shape
asegZ = np.zeros((sz[0], sz[1], sz[2], 5))
aseg = np.zeros((sz[0], sz[1], sz[2]), np.int8)
aseg_slice = np.zeros((sz[1], sz[2]), np.int8)
for j in range(sz[0]):
seg_mc_Z = segmentation_model.predict(np.expand_dims(imgs[j, :, :, :], axis=0))
asegZ[j, :, :, :] = seg_mc_Z[0, :, :, :]
##new for CS527
slice = asegZ[j].argmax(get_channel_axis())
for i in range(nlabel):
for j in range(ncluster[i] - 1):
slice[slice == ind] = i
ind = ind + 1
for row in range(sz[1]):
aseg_slice[row,:] = slice[row,:]
itkimage = sitk.GetImageFromArray(aseg_slice)
sitk.WriteImage(itkimage, output_folder + str(id) + '_' + str(j) + '_autoseg.nii.gz', True)
print('save seg slice to ' + output_folder + str(id) + '_' + str(j) + '_autoseg.nii.gz')
seg_mc = ((asegZ+asegZ)/2).argmax(get_channel_axis())
seg = seg_mc.copy()
ind = nlabel
for i in range(nlabel):
for j in range(ncluster[i] - 1):
seg[seg == ind] = i
ind = ind + 1
for j in range(sz[0]):
aseg[j,:,:] = seg[j,:,:]
print(aseg.shape)
itkimage = sitk.GetImageFromArray(aseg)
sitk.WriteImage(itkimage, output_folder +str(id)+'_autoseg.nii.gz', True)
print('save seg to ' + output_folder +str(id)+'_autoseg.nii.gz')
| [
"jpfranci@usc.edu"
] | jpfranci@usc.edu |
ededa83258296969295c8cad52141c1e84705238 | a900904009930d92b89327023ad5f1b8d184ae3d | /usuario e seha.py | 22ca1edcbb0495b1a0fd55d908c51379a29ee282 | [] | no_license | danielrocha1/exercicios-PythonBrasil | d288841666f64addf17d8087d50a550ae9c2324d | 6ae4c5ae9274a2140c613d02e47773ed1dffc90a | refs/heads/master | 2022-02-16T11:58:27.011057 | 2019-08-25T07:49:21 | 2019-08-25T07:49:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 425 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 30 23:17:39 2019
@author: Brainiac
"""
user = input("Entre com o nome de usuario: ")
senha = input("Entre com a senha: ")
if user == senha:
while user == senha:
print("usuario e senha não podem ser o mesmo. ")
user = input("Entre com o nome de usuario: ")
senha = input("Entre com a senha: ")
print("usuario e senha não podem ser o mesmo. ") | [
"noreply@github.com"
] | danielrocha1.noreply@github.com |
5bd5a5cdb2cadd2de1aca2e74be6554ac1a32b2e | bf84f1f2eb99c877e4cda94f656764fd7dd6f00a | /noteBook/registration/admin.py | f7a3a57110037066730aa7faffd5dc3dc870186b | [] | no_license | Jubayer247/mixed | 8ac2c925f92d8e9ab23e3fde28ccae192e4e567e | 6a24e1005e7f69ddc68d85e65e56c6d589c97abb | refs/heads/master | 2023-07-21T23:23:12.462707 | 2021-08-31T00:03:28 | 2021-08-31T00:03:28 | 401,512,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | from django.contrib import admin
from registration.models import Person
from registration.models import UserProfileInfro
# Register your models here.
admin.site.register(Person)
admin.site.register(UserProfileInfro)
| [
"mdjubayer247@gmail.com"
] | mdjubayer247@gmail.com |
c131f362692859a0b68734a5cacc037c5e5f74fb | 86febcf9b1db54e4d3239459a73cdb3017aca646 | /migrate.py | edb1805e25bbcf2b1964fe1d5e571a8b84f713b8 | [] | no_license | ankoval/es_into_arango | 19f55d010f3e613d03a3c00b0288fa43833688d9 | 4d8dd2910dfbeec4ecb7690ab9f6d85fedfc990d | refs/heads/master | 2022-12-09T18:21:36.141322 | 2020-03-02T14:14:40 | 2020-03-02T15:31:14 | 243,818,476 | 0 | 0 | null | 2022-01-21T20:23:38 | 2020-02-28T17:29:36 | Python | UTF-8 | Python | false | false | 1,924 | py | import logging
from pyArango.connection import Connection
from elasticsearch import Elasticsearch
from settings import ES_HOST, ES_INDEX, ARANGO_ROOT_PASSWORD, ARANGO_USERNAME, ARANGO_COLLECTION, ARANGO_URL
def main():
assert ES_INDEX
assert ES_HOST
assert ARANGO_URL
assert ARANGO_ROOT_PASSWORD
assert ARANGO_COLLECTION
assert ARANGO_USERNAME
# ES connection
es = Elasticsearch([ES_HOST])
# Arango connection
conn = Connection(arangoURL=ARANGO_URL, username=ARANGO_USERNAME, password=ARANGO_ROOT_PASSWORD)
if ES_INDEX not in conn.databases:
conn.createDatabase(name=ES_INDEX)
db = conn[ES_INDEX]
if not db.hasCollection(ARANGO_COLLECTION):
db.createCollection(name=ARANGO_COLLECTION)
# Build queries
existed_patents_total = db.AQLQuery("RETURN LENGTH(Patents)").response['result'][0] or 1000
existed_patents = db.AQLQuery(
f"FOR doc IN {ARANGO_COLLECTION} RETURN doc._file",
batchSize=existed_patents_total
).response['result']
es_query_exclude_existed = {"query": {"bool": {"must_not": [{"ids": {"values": existed_patents}}]}}}
aql_query_insert = f"INSERT @doc INTO {ARANGO_COLLECTION} LET newDoc = NEW RETURN newDoc"
# Handle ES pagination
patents = es.search(index=ES_INDEX, body=es_query_exclude_existed, scroll='1m', size=100)
scroll_id = patents['_scroll_id']
scroll_size = len(patents['hits']['hits'])
while scroll_size > 0:
# Add patents to Arango
for hit in patents['hits']['hits']:
hit['_file'] = hit['_id']
db.AQLQuery(aql_query_insert, bindVars={'doc': hit})
logging.info(f"Added: {hit['_file']}")
# Scroll next batch
patents = es.scroll(scroll_id=scroll_id, scroll='1m')
scroll_id = patents['_scroll_id'],
scroll_size = len(patents['hits']['hits'])
if __name__ == '__main__':
main()
| [
"an.koval@svitla.com"
] | an.koval@svitla.com |
4a8843481bfb0c8be2d71a733fcacdb4bb18c011 | b9cfc7364d623e60fde1c01e3199b17ba73a3ca1 | /app/api/base.py | 267115538089b944e475ba1326a80d75f236c92c | [] | no_license | chrisb-c01/flask-api-skeleton | 31640561fc59805ca5efcb79c3947ffa727e4ebb | 5e328667cde86d696d42b486f48fce9fcc0d0409 | refs/heads/master | 2023-01-24T07:31:48.806934 | 2020-12-05T10:00:46 | 2020-12-05T10:00:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,618 | py | # Standard library imports
import inspect
import uuid
from datetime import datetime as dt
from typing import Optional
# Third party imports
from flask import Request, g, request
from flask_classful import FlaskView
from flask_sqlalchemy import DefaultMeta
from marshmallow.exceptions import ValidationError as SchemaValidationError
from marshmallow.schema import SchemaMeta
from webargs.flaskparser import parser, use_args
# Local application imports
from app.utils import localize_text
# Local folder imports
from .const import DEFAULT_ITEMS_PER_PAGE
from .error import BadRequestError, NotFoundError, ValidationError
from .representation import output_json
from .response import APIResponse
from .schema import APIPaginationDataSchema, BaseSchema
class BaseAPI(FlaskView):
base_args = ["args"]
representations = {
"application/json": output_json,
"flask-classful/default": output_json,
}
model: Optional[DefaultMeta] = None
schema: Optional[SchemaMeta] = None
route_prefix = "/api/"
route_base: Optional[str] = None
method_dashified = True
api_version: Optional[str] = None
trailing_slash = False
service: Optional[type] = None
def _add_api_version(self):
if getattr(g, "api_version", None) is None:
g.api_version = self.api_version
def _log_start_time(self):
if getattr(g, "request_start_time", None) is None:
g.request_start_time = dt.now()
def _service(self):
assert inspect.isclass(self.service)
return self.service(model=self.model)
@staticmethod
def _add_params():
if getattr(g, "request_params", None) is None:
g.request_params = dict(request.args)
@staticmethod
def _update_params(data: dict):
assert isinstance(data, dict)
request_params = getattr(g, "request_params", None)
if request_params is None:
g.request_params = data
else:
g.request_params = dict(request_params, **data)
@staticmethod
def _add_request_id():
request_id = uuid.uuid4()
g.request_id = request_id
def before_request(self, name, *args, **kwargs):
self._log_start_time()
self._add_request_id()
self._add_api_version()
self._add_params()
def _get_item_by_id_or_not_found(self, id: int):
try:
item = self._service().get_by_id(id)
except TypeError:
raise BadRequestError
if item is None:
raise NotFoundError
return item
@staticmethod
@parser.error_handler
def _handle_validation_error(
error: SchemaValidationError,
req: Request,
schema: BaseSchema,
*,
error_status_code: int,
error_headers: dict,
):
"""Handles webargs validation error"""
assert hasattr(error, "messages") and error.messages is not None
error_messages = error.messages
errors = (
error_messages if isinstance(error_messages, list) else [error_messages]
)
raise ValidationError(errors=errors)
def post(self):
request_body = request.get_json()
if request_body is None:
message = localize_text("empty_post_body")
raise BadRequestError(message=message)
try:
data = self.schema().load(request_body)
except SchemaValidationError as err:
raise ValidationError(errors=err.messages)
item = self._service().create(data)
return APIResponse().create_response(item=item, schema=self.schema)
def get(self, id: int):
self._update_params({"id": id})
schema = self.schema
assert schema is not None and issubclass(schema, BaseSchema)
item = self._get_item_by_id_or_not_found(id)
return APIResponse().create_response(item=item, schema=schema)
@use_args(APIPaginationDataSchema(), location="query")
def index(self, args):
items_per_page: int = args.get("items_per_page") or DEFAULT_ITEMS_PER_PAGE
page_index: Optional[int] = args.get("page_index")
start_index: Optional[int] = args.get("start_index")
if page_index is None and start_index is None:
error_message = localize_text("missing_page_and_start_index")
raise BadRequestError(message=error_message)
query = self._service().list()
return APIResponse().create_paginated_response(
query=query,
items_per_page=items_per_page,
page_index=page_index,
start_index=start_index,
schema=self.schema,
)
def put(self, id: int):
self._update_params({"id": id})
request_body = request.get_json()
if request_body is None:
message = localize_text("empty_put_body")
raise BadRequestError(message=message)
schema = self.schema
assert schema is not None and issubclass(schema, BaseSchema)
try:
data = schema().load(request_body)
except SchemaValidationError as err:
raise ValidationError(errors=err.messages)
item = self._get_item_by_id_or_not_found(id)
item = self._service().update(item, data)
return APIResponse().create_response(item=item, schema=schema)
def delete(self, id: int):
self._update_params({"id": id})
item = self._get_item_by_id_or_not_found(id)
self._service().delete(item)
return APIResponse().create_success_response(
model=self.model, method=request.method
)
| [
"chris@c01.nl"
] | chris@c01.nl |
482e40ca48bc52325a067d074ea5ea2f16423282 | 5a671c25778638c9807fcddfb49d149e55db4c8f | /apps/courses/migrations/0003_course_course_org.py | 6f6673508d75f9fa714723fedadba71584d38a29 | [] | no_license | FathallaSelim/OnlineCourses | 629386cff5acba0722e8d896d817a379952a1837 | 560da03ef66ab83f75958063eb69f1a22b5393b4 | refs/heads/master | 2023-01-13T20:00:45.146188 | 2019-12-24T09:39:48 | 2019-12-24T09:39:48 | 228,176,413 | 1 | 1 | null | 2022-12-27T14:58:31 | 2019-12-15T11:55:44 | CSS | UTF-8 | Python | false | false | 588 | py | # Generated by Django 2.0.2 on 2018-03-17 16:17
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organizations', '0004_auto_20180316_1813'),
('courses', '0002_auto_20180308_1700'),
]
operations = [
migrations.AddField(
model_name='course',
name='course_org',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.CourseOrg', verbose_name='课程机构'),
),
]
| [
"noreply@github.com"
] | FathallaSelim.noreply@github.com |
27b9fa085ff0a35509e63a073ce6dc271c076ef2 | bd80a3f33201ef4a4e0012a266cdaebef1cb8785 | /orders/urls.py | f85f8a0cb88dd04fbe703a47fb678b053903c04a | [
"MIT"
] | permissive | jedrek1993/django_shop | 00d213f9f07f62d04d56ca2e355a9e3e87d5b8a6 | 22e9f437e382edee6e03e3da18b8c080965d55cf | refs/heads/master | 2020-08-04T08:16:48.233840 | 2019-10-06T11:22:08 | 2019-10-06T11:22:08 | 212,070,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | from django.urls import path
from . import views
app_name = 'orders'
urlpatterns = [
path('create/', views.order_create, name='order_create'),
]
| [
"jedrek1993@gmail.com"
] | jedrek1993@gmail.com |
ae5ae37d312277053c8e9dd1eec349e0bd655d8a | d3f17e37afc0107ab272b98dd353fea2178f23a8 | /bin/createfontdatachunk.py | ac5c43230fccb76432df9553318268ba587c67bf | [] | no_license | parmeshwor11/OrderProcessingSystem | a9d96e70f8651e80ddb43be515d2b43d141a8ad0 | 39c960c11813ca63556512d79ce55e0b13cd32af | refs/heads/master | 2021-01-12T13:50:41.441762 | 2016-09-21T16:47:12 | 2016-09-21T16:47:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | #!/home/krishna/Downloads/OrderProcessingSystem/bin/python
from __future__ import print_function
import base64
import os
import sys
if __name__ == "__main__":
# create font data chunk for embedding
font = "Tests/images/courB08"
print(" f._load_pilfont_data(")
print(" # %s" % os.path.basename(font))
print(" BytesIO(base64.decodestring(b'''")
base64.encode(open(font + ".pil", "rb"), sys.stdout)
print("''')), Image.open(BytesIO(base64.decodestring(b'''")
base64.encode(open(font + ".pbm", "rb"), sys.stdout)
print("'''))))")
# End of file
| [
"krishna.karki195@gmail.com"
] | krishna.karki195@gmail.com |
ead25baa834d5068f12928b849eb5912e4cccade | 6bb9670c84145c911e6dfab4804de08ae1eefaaf | /15_Nguyen_Tu_Giang/bai-2.7.py | d415146abc1bc20d6d78728d3bdc1b09d739e665 | [
"MIT"
] | permissive | lpython2006e/student-practices | 77542696e1c69dee74dfef00336ef5fee45c75fb | 84343eae57d86708a7984aa02f77183a4688a508 | refs/heads/master | 2022-12-12T05:16:01.041154 | 2020-09-06T13:21:10 | 2020-09-06T13:21:10 | 278,646,313 | 0 | 8 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | # Write a function that tests whether a string is a palindrome.
def check_palindrome(value):
for i in range(int(len(value)/2)):
# print(value[i], value[len(value) - 1 - int(i)])
if value[i] != value[len(value) - 1 - int(i)]:
return False
else:
return True
print(check_palindrome('1212'))
| [
"noreply@github.com"
] | lpython2006e.noreply@github.com |
5e0fcb59414b74f59452c2e24a232fa23fa2fb66 | 1050e301288df5e9cbbcf6ce20afb3fad4ff9164 | /EMI.py | 0cbce1930546d9c4fece2680dfc3e48e37d757e4 | [] | no_license | KevinGuo-bjut/Visualization-of-EMI-calculation | c86b131cffd07534fc4beb0faa7ec0ba7dbd730b | d5fdea195ddd31c060c038719de4b502d1039518 | refs/heads/main | 2023-04-19T10:48:15.327045 | 2021-05-11T04:06:28 | 2021-05-11T04:06:28 | 366,249,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,989 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 1 17:08:24 2020
@author: guokai
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from matplotlib.pyplot import MultipleLocator
import xlwt
font = {'family' : 'Arial',
'weight' : 'normal',
'size' : 25}
df_EMI=pd.read_csv(r'EMI.csv')
RL=[]
THK=[]
RL_min=[]
e1=df_EMI['e1']
e2=df_EMI['e2']
u1=df_EMI['u1']
u2=df_EMI['u2']
df_EMI['sqrt_erur']=np.sqrt((e1-e2*1j)*(u1-u2*1j))
df_EMI['sqrt_ur_er']=np.sqrt((u1-u2*1j)/(e1-e2*1j))
plt.figure(figsize=(14,10))
for i in np.linspace(0,5,201):
thk=i
df_EMI['2pifd_c']=df_EMI['2pi_c']*df_EMI['f']*thk
df_EMI['tanh_x']=np.tanh(df_EMI['2pifd_c']*df_EMI['sqrt_erur']*1j)
df_EMI['Z_in']=df_EMI['sqrt_ur_er']*df_EMI['tanh_x']
df_EMI['RL']=20*np.log10(np.abs((df_EMI['Z_in']-1)/(df_EMI['Z_in']+1)))
RL.append(df_EMI['RL'].tolist())
#THK.append(thk)
RL_min.append(min(df_EMI['RL']))
# df_EMI['f'][np.argmin(df_EMI['RL'])]
plt.plot(df_EMI['f']/1E9,df_EMI['RL'],label=str(i))
#plt.legend(['5','4.22','4.215','4.210','4.205','4.20','4.195','4.19','4.185','4.18','4'])
#plt.legend()
plt.xlabel('Frequency (GHz)',font)
plt.ylabel('Reflection loss (dB)',font)
plt.tick_params(labelsize=20)
print('RL最小值为', min(RL_min))
thk_list=np.linspace(0,5,201).tolist()
print('RL最小值对应的厚度为', thk_list[np.argmin(RL_min)])
#ax=plt.axes(projection='3d')
#x=THK
#y=df_EMI['f']/1E9
#z=RL
#ax=plt.gca()
#y_major_locator=MultipleLocator(1)
#x_major_locator=MultipleLocator(1)
#ax.yaxis.set_major_locator(y_major_locator)
#ax.xaxis.set_major_locator(x_major_locator)
#plt.tick_params(labelsize=20)
#ax.contour3D(x,y,z,500,cmap='viridis',rstride=1,cstride=1,edgecolor='none')
#ax.set_xlabel('Thickness (mm)',font)
#ax.set_ylabel('Frequency (GHz)',font)
#ax.set_zlabel('Reflection loss (dB)',font)
#ax.view_init(elev=0, azim=0)
| [
"noreply@github.com"
] | KevinGuo-bjut.noreply@github.com |
23b46451e35b9db885aa86a48b58c9bd8667e354 | 014f7936e55ed70d53b48a4b3dac38c65733eeb0 | /Ameya/News_Comments_Scraping/fb_comment_scraping.py | fd25909feddd53c24cc983a9ea6a09136893ea49 | [
"MIT"
] | permissive | ameya03dot/News-Scrapping-and-Naiive-Bayes | deb34cb524c7d2aa12dcf83f760373e0bf4d36a1 | 14474458ce6d6b80523bce50f62001e21daa0ce3 | refs/heads/main | 2023-02-25T08:57:52.489594 | 2021-01-29T09:15:45 | 2021-01-29T09:15:45 | 334,092,855 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,513 | py | # -*- coding: utf-8 -*-
"""Fb_comment_scraping.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1DvMunIXV8hrN4SmuK3GEcJ5nnVl3Q6-b
"""
import json
import datetime
import csv
import time
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
app_id = "<enter here>"
app_secret ="<enter here>" # DO NOT SHARE WITH ANYONE!
file_id = "434174436675167"
access_token = app_id + "|" + app_secret
def request_until_succeed(url):
req = Request(url)
success = False
while success is False:
try:
response = urlopen(req)
if response.getcode() == 200:
success = True
except Exception as e:
print(e)
time.sleep(5)
print("Error for URL {}: {}".format(url, datetime.datetime.now()))
print("Retrying.")
return response.read()
# Needed to write tricky unicode correctly to csv
def unicode_decode(text):
try:
return text.encode('utf-8').decode()
except UnicodeDecodeError:
return text.encode('utf-8')
def getFacebookCommentFeedUrl(base_url):
# Construct the URL string
fields = "&fields=id,message,reactions.limit(0).summary(true)" + \
",created_time,comments,from,attachment"
url = base_url + fields
return url
def getReactionsForComments(base_url):
reaction_types = ['like', 'love', 'wow', 'haha', 'sad', 'angry']
reactions_dict = {} # dict of {status_id: tuple<6>}
for reaction_type in reaction_types:
fields = "&fields=reactions.type({}).limit(0).summary(total_count)".format(
reaction_type.upper())
url = base_url + fields
data = json.loads(request_until_succeed(url))['data']
data_processed = set() # set() removes rare duplicates in statuses
for status in data:
id = status['id']
count = status['reactions']['summary']['total_count']
data_processed.add((id, count))
for id, count in data_processed:
if id in reactions_dict:
reactions_dict[id] = reactions_dict[id] + (count,)
else:
reactions_dict[id] = (count,)
return reactions_dict
def processFacebookComment(comment, status_id, parent_id=''):
# The status is now a Python dictionary, so for top-level items,
# we can simply call the key.
# Additionally, some items may not always exist,
# so must check for existence first
comment_id = comment['id']
comment_message = '' if 'message' not in comment or comment['message'] \
is '' else unicode_decode(comment['message'])
comment_author = unicode_decode(comment['from']['name'])
num_reactions = 0 if 'reactions' not in comment else \
comment['reactions']['summary']['total_count']
if 'attachment' in comment:
attachment_type = comment['attachment']['type']
attachment_type = 'gif' if attachment_type == 'animated_image_share' \
else attachment_type
attach_tag = "[[{}]]".format(attachment_type.upper())
comment_message = attach_tag if comment_message is '' else \
comment_message + " " + attach_tag
# Time needs special care since a) it's in UTC and
# b) it's not easy to use in statistical programs.
comment_published = datetime.datetime.strptime(
comment['created_time'], '%Y-%m-%dT%H:%M:%S+0000')
comment_published = comment_published + datetime.timedelta(hours=-5) # EST
comment_published = comment_published.strftime(
'%Y-%m-%d %H:%M:%S') # best time format for spreadsheet programs
# Return a tuple of all processed data
return (comment_id, status_id, parent_id, comment_message, comment_author,
comment_published, num_reactions)
def scrapeFacebookPageFeedComments(page_id, access_token):
with open('{}_facebook_comments.csv'.format(file_id), 'w') as file:
w = csv.writer(file)
w.writerow(["comment_id", "status_id", "parent_id", "comment_message",
"comment_author", "comment_published", "num_reactions",
"num_likes", "num_loves", "num_wows", "num_hahas",
"num_sads", "num_angrys", "num_special"])
num_processed = 0
scrape_starttime = datetime.datetime.now()
after = ''
base = "https://graph.facebook.com/v2.9"
parameters = "/?limit={}&access_token={}".format(
100, access_token)
print("Scraping {} Comments From Posts: {}\n".format(
file_id, scrape_starttime))
with open('{}_facebook_statuses.csv'.format(file_id), 'r') as csvfile:
reader = csv.DictReader(csvfile)
# Uncomment below line to scrape comments for a specific status_id
# reader = [dict(status_id='5550296508_10154352768246509')]
for status in reader:
has_next_page = True
while has_next_page:
node = "/{}/comments".format(status['status_id'])
after = '' if after is '' else "&after={}".format(after)
base_url = base + node + parameters + after
url = getFacebookCommentFeedUrl(base_url)
# print(url)
comments = json.loads(request_until_succeed(url))
reactions = getReactionsForComments(base_url)
for comment in comments['data']:
comment_data = processFacebookComment(
comment, status['status_id'])
reactions_data = reactions[comment_data[0]]
# calculate thankful/pride through algebra
num_special = comment_data[6] - sum(reactions_data)
w.writerow(comment_data + reactions_data +
(num_special, ))
if 'comments' in comment:
has_next_subpage = True
sub_after = ''
while has_next_subpage:
sub_node = "/{}/comments".format(comment['id'])
sub_after = '' if sub_after is '' else "&after={}".format(
sub_after)
sub_base_url = base + sub_node + parameters + sub_after
sub_url = getFacebookCommentFeedUrl(
sub_base_url)
sub_comments = json.loads(
request_until_succeed(sub_url))
sub_reactions = getReactionsForComments(
sub_base_url)
for sub_comment in sub_comments['data']:
sub_comment_data = processFacebookComment(
sub_comment, status['status_id'], comment['id'])
sub_reactions_data = sub_reactions[
sub_comment_data[0]]
num_sub_special = sub_comment_data[
6] - sum(sub_reactions_data)
w.writerow(sub_comment_data +
sub_reactions_data + (num_sub_special,))
num_processed += 1
if num_processed % 100 == 0:
print("{} Comments Processed: {}".format(
num_processed,
datetime.datetime.now()))
if 'paging' in sub_comments:
if 'next' in sub_comments['paging']:
sub_after = sub_comments[
'paging']['cursors']['after']
else:
has_next_subpage = False
else:
has_next_subpage = False
# output progress occasionally to make sure code is not
# stalling
num_processed += 1
if num_processed % 100 == 0:
print("{} Comments Processed: {}".format(
num_processed, datetime.datetime.now()))
if 'paging' in comments:
if 'next' in comments['paging']:
after = comments['paging']['cursors']['after']
else:
has_next_page = False
else:
has_next_page = False
print("\nDone!\n{} Comments Processed in {}".format(
num_processed, datetime.datetime.now() - scrape_starttime))
if __name__ == '__main__':
scrapeFacebookPageFeedComments(file_id, access_token) | [
"noreply@github.com"
] | ameya03dot.noreply@github.com |
2e2a5abee434464fbc25603297c483f0204e4a90 | 055ee67a0ab720811e029c1b99885cb6401feb19 | /test/deleteSnippets.py | b0e48f0167273999579096caf6c2d803d283bdf3 | [] | no_license | Karkina/dealServiceSite | 2b7b9e1439091c173a2e564b241bde67c516e16d | 3a7fa3a478f3d4f1c52ce0d814f3ab299af5b84b | refs/heads/main | 2023-09-03T08:52:18.207751 | 2021-10-29T13:27:38 | 2021-10-29T13:27:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | import requests
url = 'http://127.0.0.1:8000/snippets/3'
x = requests.delete(url)
print(x) | [
"nico.soumare@gmail.com"
] | nico.soumare@gmail.com |
054848eb10e426f57678abd36a0affc5f58c603f | 7d7c175a03a739f13cb1d35fd1ccb5eb6b521a7f | /Tree.py | 4f0f82f5d89226f7f32b9dee7b3fa23beb0d9443 | [] | no_license | Mayurdeo/Speed-Dating-Decision-Prediction | 53f282ec4f0d959dd5db9b988e6ab6f21e5b35e9 | 4342ed5554332f51974f1a6eb7da494299a07faa | refs/heads/main | 2023-02-26T00:27:52.219706 | 2021-01-25T01:11:13 | 2021-01-25T01:11:13 | 332,578,433 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,191 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
from collections import Counter
import copy
import random
pd.options.mode.chained_assignment = None
# In[2]:
#Current version doesn'tr support
import warnings
warnings.simplefilter(action='ignore', category=DeprecationWarning)
# In[3]:
# #Load the data set
# testSet=pd.read_csv("testSet.csv",delimiter=',')
# trainingSet=pd.read_csv("trainingSet.csv",delimiter=',')
# In[4]:
# #reading file
trainingSet=pd.read_csv("{}".format(input("Enter the Training Dataset with csv extension : eg.trainingSet.csv ")),delimiter=',')
testSet=pd.read_csv("{}".format(input("Enter the Test Dataset with csv extension :eg.testSet.csv ")),delimiter=',')
# In[5]:
class DecisionTree():
''' code for a decision tree '''
def __init__(self,trainingSet,testSet,maxDepth=8,exampleLimit=50,decisionVar='decision',vectorised=False):
self.trainingSet=trainingSet
self.testSet=testSet
self.maxDepth=maxDepth
self.exampleLimit=exampleLimit
self.decisionVar=decisionVar
self.vectorised=vectorised
def labelPurity(self,data):
"""This function is to check whether the decision column has any impurities"""
decisionColumn = data[:, -1]
uniqueClass = np.unique(decisionColumn)
if len(uniqueClass) == 1:
return True
else:
return False
def majorityClass(self,data):
decisionColumn = data[:, -1]
uniqueClass, counts = np.unique(decisionColumn, return_counts=True)
index = counts.argmax()
label = uniqueClass[index]
return label
def attributeSplit(self,data):
splits = {}
_, col = data.shape
for ind in range(col - 1):
values = data[:, ind]
uniqueList = np.unique(values)
splits[ind] = uniqueList
return splits
def dataSplit(self,data, splitCol, splitValue):
splitColValues = data[:, splitCol]
dataLeft= data[splitColValues == splitValue]
dataRight = data[splitColValues != splitValue]
return dataLeft, dataRight
def giniIndex(self,data):
decisionColumn=data[:,-1]
_, count = np.unique(decisionColumn, return_counts=True)
prob = count / count.sum()
gini=1-np.sum(prob**2)
return gini
def giniGain(self,dataLeft, dataRight):
n=len(dataLeft)+len(dataRight)
giniLeft=self.giniIndex(dataLeft)
giniRight=self.giniIndex(dataRight)
giniGain=(len(dataLeft)/n)*giniLeft + (len(dataRight)/n)*giniRight
return giniGain
def bestSplit(self,data, attrSplits):
beforeGini = np.inf
for column_index in attrSplits:
for value in attrSplits[column_index]:
dataLeft, dataRight = self.dataSplit(data, splitCol=column_index, splitValue=value)
afterGini = self.giniGain(dataLeft, dataRight)
if afterGini <= beforeGini:
beforeGini = afterGini
bestSplitColumn = column_index
bestSplitValue = value
return bestSplitColumn, bestSplitValue
def tree(self,examples, counter=0):
if counter == 0:
global featureNames
featureNames = examples.columns
data = examples.values
else:
data = examples
if (self.labelPurity(data)) or (len(data) <self.exampleLimit) or (counter == self.maxDepth):
leaf = self.majorityClass(data)
return leaf
else:
counter += 1
attrSplits = self.attributeSplit(data)
split_column, split_value = self.bestSplit(data, attrSplits)
dataLeft, dataRight = self.dataSplit(data, split_column, split_value)
if len(dataLeft) == 0 or len(dataRight) == 0:
leaf = majorityClass(data)
return leaf
name = featureNames[split_column]
query = "{} = {}".format(name, split_value)
sub_tree = {query: []}
false = self.tree(dataLeft, counter)
true = self.tree(dataRight, counter)
if false == true:
sub_tree = false
else:
sub_tree[query].append(false)
sub_tree[query].append(true)
return sub_tree
def predict(self,example, tree):
query = list(tree.keys())[0]
name, cp, value = query.split(" ")
if str(example[name]) == value:
pred = tree[query][0]
else:
pred = tree[query][1]
if not isinstance(pred, dict):
return pred
else:
root = pred
return self.predict(example, root)
def accuracy(self,example, root,key):
prediction = example.apply(self.predict, args=(root,), axis=1)
boolean = prediction == example["decision"]
acc = boolean.mean()
acc = np.round(acc*100,decimals=2)
if key=="train":
print("Training Accuracy DT : {}".format(acc))
elif key=="test":
print("Testing Accuracy DT : {}".format(acc))
return acc
def bootstrapPredict(self,example,root):
prediction = example.apply(self.predict, args=(root,), axis=1)
return np.array(prediction)
def bootstrapAccuracy(self,example,prediction,key,model=None):
boolean = prediction == example["decision"]
acc = boolean.mean()
acc = np.round(acc*100,decimals=2)
if model=="bagging":
if key=="train":
print("Training Accuracy BT : {}".format(acc))
elif key=="test":
print("Testing Accuracy BT : {}".format(acc))
if model=="forests":
if key=="train":
print("Training Accuracy RF : {}".format(acc))
elif key=="test":
print("Testing Accuracy RF : {}".format(acc))
return acc
# In[6]:
class RandomForests(DecisionTree):
''' code for random forest '''
def __init__(self,trainingSet,testSet,maxDepth=8,exampleLimit=50,decisionVar='decision',vectorised=False):
self.trainingSet=trainingSet
self.testSet=testSet
self.maxDepth=maxDepth
self.exampleLimit=exampleLimit
self.decisionVar=decisionVar
self.vectorised=vectorised
def randomSplitSplits(self,data):
randomSplits = {}
_, nCol = data.shape
columnInd = list(range(nCol - 1))
rnSubspace=int(np.sqrt(nCol))
if rnSubspace and rnSubspace <= len(columnInd):
columnInd = random.sample(population=columnInd, k=rnSubspace)
for column_index in columnInd:
values = data[:, column_index]
unique_values = np.unique(values)
randomSplits[column_index] = unique_values
return randomSplits
def forests(self,examples, counter=0):
if counter == 0:
global featureNames
featureNames = examples.columns
data = examples.values
else:
data = examples
if (self.labelPurity(data)) or (len(data) <self.exampleLimit) or (counter == self.maxDepth):
leaf = self.majorityClass(data)
return leaf
else:
counter += 1
attrSplits = self.randomSplitSplits(data)
split_column, split_value = self.bestSplit(data, attrSplits)
dataLeft, dataRight = self.dataSplit(data, split_column, split_value)
if len(dataLeft) == 0 or len(dataRight) == 0:
leaf = self.majorityClass(data)
return leaf
name = featureNames[split_column]
query = "{} = {}".format(name, split_value)
sub_tree = {query: []}
false = self.forests(dataLeft, counter)
true = self.forests(dataRight, counter)
if false == true:
sub_tree = false
else:
sub_tree[query].append(false)
sub_tree[query].append(true)
return sub_tree
# In[7]:
#To find mode value in a dataframe
def modeValue(x):
a, b = Counter(x).most_common(1)[0]
return pd.Series([a, b])
# In[8]:
def decisionTree(trainingSet,testSet):
DT=DecisionTree(trainingSet,testSet,8,50,"decision",vectorised=True)
tree=DT.tree(trainingSet)
DT.accuracy(trainingSet,root=tree,key="train")
DT.accuracy(testSet,root=tree,key="test")
# In[9]:
def bagging(trainingSet,testSet):
trees=[]
predictTrain=pd.DataFrame()
predictTest=pd.DataFrame()
stopCriteria=30
for index in range(stopCriteria):
examples=trainingSet.sample(frac=1,replace=True)
Bagging=DecisionTree(examples,testSet,8,50,"decision",vectorised=True)
tree=Bagging.tree(examples)
trees.append(tree)
for count,tree in enumerate(trees):
predTrain=Bagging.bootstrapPredict(trainingSet,tree)
predTest=Bagging.bootstrapPredict(testSet,tree)
predictTrain[str(count)]=predTrain
predictTest[str(count)]=predTest
predictTrain[['frequent','freq_count']] = predictTrain.apply(modeValue, axis=1)
predictTest[['frequent','freq_count']] = predictTest.apply(modeValue, axis=1)
Bagging.bootstrapAccuracy(trainingSet,(predictTrain["frequent"]).values,key="train",model="bagging")
Bagging.bootstrapAccuracy(testSet,(predictTest["frequent"]).values,key="test",model="bagging")
# In[10]:
def randomForests(trainingSet,testSet):
forests=[]
rf_predictTrain=pd.DataFrame()
rf_predictTest=pd.DataFrame()
stopCriteria=30
for index in range(stopCriteria):
examples=trainingSet.sample(frac=1,replace=True)
RF=RandomForests(examples,testSet,8,50,"decision",vectorised=True)
rf_tree=RF.forests(examples)
forests.append(rf_tree)
for count,trea in enumerate(forests):
rf_predTrain=RF.bootstrapPredict(trainingSet,trea)
rf_predTest=RF.bootstrapPredict(testSet,trea)
rf_predictTrain[str(count)]=rf_predTrain
rf_predictTest[str(count)]=rf_predTest
rf_predictTrain[['frequent','freq_count']] = rf_predictTrain.apply(modeValue, axis=1)
rf_predictTest[['frequent','freq_count']] = rf_predictTest.apply(modeValue, axis=1)
RF.bootstrapAccuracy(trainingSet,(rf_predictTrain["frequent"]).values,key="train",model="forests")
RF.bootstrapAccuracy(testSet,(rf_predictTest["frequent"]).values,key="test",model="forests")
# In[11]:
for i in range(3):
modelIdx=int(input("Enter model index : eg.1 or 2 or 3"))
if modelIdx==1:
decisionTree(trainingSet,testSet)
elif modelIdx==2:
bagging(trainingSet,testSet)
elif modelIdx==3:
randomForests(trainingSet,testSet)
else:
print("Invalid Input") | [
"noreply@github.com"
] | Mayurdeo.noreply@github.com |
f71f89c4c19d7d8045d9c586cb80e5b3176cf92f | ec53949dafa4b6ad675d679b05ed7c83fef2c69a | /DataStructuresAndAlgo/SortAlgo/QuickSort/QuickSort.py | ec6d49ec37082d18b789ab13db62a63fe2db4d61 | [] | no_license | tpotjj/Python | 9a5a20a53cd7a6ec14386c1db8ce155e0fc9ab8a | ca73c116ada4d05c0c565508163557744c86fc76 | refs/heads/master | 2023-07-11T16:37:10.039522 | 2021-08-14T11:17:55 | 2021-08-14T11:17:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 610 | py | def partition(CustomList, low, high):
i = low -1
pivot = CustomList[high]
for j in range(low, high):
if CustomList[j] <= pivot:
i += 1
CustomList[i], CustomList[j] = CustomList[j], CustomList[i]
CustomList[i+1], CustomList[high] = CustomList[high], CustomList[i+1]
return (i+1)
def Quicksort(CustomList, low, high):
if low < high:
pi = partition(CustomList, low, high)
Quicksort(CustomList, low, pi-1)
Quicksort(CustomList, pi+1, high)
BasicList = [2, 6, 4, 8, 1, 3]
print(BasicList)
Quicksort(BasicList, 0, 5)
print(BasicList) | [
"joris97jansen@gmail.com"
] | joris97jansen@gmail.com |
f155e40ab83b0e0703d0bfe760ae2c41de4fcdb7 | e5e9ee9e4db2e400e7f87647501ee412c13d76e5 | /python/python-base/turtle/fun.py | 5cab82c491cf1fee60b9d184422b03d78cfa699e | [] | no_license | beingveera/whole-python | 524441eec44379c36cb1cfeccdbc65bf1c15d2f6 | 3f2b3cb7528afb9605ab6f9d4d2efc856a247af5 | refs/heads/main | 2023-05-15T06:28:03.058105 | 2021-06-05T09:37:47 | 2021-06-05T09:37:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | import turtle as t
t.fd(1000)
t.setposition(200,-490)
t.clear()
t.speed(0.1)
t.circle(-50)
t.tracer(1,3)
t.color('blue')
t.pensize(10)
t.circle(20) | [
"sharma.lokesh.222001@gmail.com"
] | sharma.lokesh.222001@gmail.com |
af9e249d069e6b3c73492d4f6283f5317d5bd7c3 | f4ac290cc3eb58cc286db3740ed805d9453e2b53 | /run.py | aff65d59d82bc7bf9c562394d51b75341f5cf01a | [] | no_license | mrazizi/Paper-Implementation-Text-Summarization-using-GAN | 7f4c49dec72f3b2cb61a66f67f1540a05f3c6e04 | b8c1244fcdcfb37a096cc76737e4f387208cf036 | refs/heads/main | 2023-08-31T06:35:30.938602 | 2021-10-19T05:06:33 | 2021-10-19T05:06:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,059 | py | import os
import time
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
import tensorflow as tf
from models.pointer_model.model import PointerModel
from models import Generator, Discriminator, Reconstructor
from data_util import config
from data_util.batcher import Batcher
from data_util.data import Vocab
from models.WGAN import WGAN
cp = ModelCheckpoint("./weights/model.{epoch:02d}-{val_loss:.2f}.hdf5",
monitor='val_loss',
verbose=0,
save_best_only=True,
save_weights_only=True,
mode='auto')
# create a directory if it doesn't already exist
if not os.path.exists('./weights'):
os.makedirs('./weights/')
class Train(object):
def __init__(self):
self.vocab = Vocab(config.vocab_path, config.vocab_size)
self.train_batcher = Batcher(config.train_data_path,
self.vocab,
hps=config.hps,
single_pass=False)
self.val_batcher = Batcher(config.eval_data_path,
self.vocab,
hps=config.hps,
single_pass=False)
def setup_train_generator(self, model_file_path=None):
generator = Generator(num_embeddings=config.vocab_size, # 4999
embedding_dim=config.emb_dim, # 128
n_labels=config.vocab_size, # 4999
pad_length=config.padding, # 20
encoder_units=config.hidden_dim, # 256
decoder_units=config.hidden_dim, # 256
)
model = generator.model()
model.summary()
model.compile(optimizer='adagrad',
lr=config.lr,
loss='categorical_crossentropy',
metrics=['accuracy'])
print('Generator Compiled.')
try:
model.fit_generator(generator=self.train_batcher.next_batch(),
samples_per_epoch=5,
validation_data=self.val_batcher.next_batch(),
callbacks=[cp],
verbose=1,
nb_val_samples=1,
nb_epoch=config.max_iterations)
except KeyboardInterrupt as e:
print('Generator training stopped early.')
print('Generator training complete.')
def setup_train_discriminator(self):
model = Discriminator().model()
model.summary()
model.compile(optimizer=Adam(lr=0.0001, beta_1=0.5, beta_2=0.9),
lr=config.lr,
loss='binary_crossentropy',
)
print('Discriminator Compiled.')
try:
model.fit_generator(generator=self.train_batcher.next_batch_discriminator(),
samples_per_epoch=5,
validation_data=self.val_batcher.next_batch_discriminator(),
callbacks=[cp],
verbose=1,
nb_val_samples=1,
nb_epoch=config.max_iterations)
except KeyboardInterrupt as e:
print('Discriminator training stopped early.')
print('Discriminator training complete.')
def setup_train_wgan_model(self):
generator = Generator(num_embeddings=config.vocab_size, # 4999
embedding_dim=config.emb_dim, # 128
n_labels=config.vocab_size, # 4999
pad_length=config.padding, # 20
encoder_units=config.hidden_dim, # 256
decoder_units=config.hidden_dim, # 256
).model()
reconstructor = Reconstructor(num_embeddings=config.vocab_size, # 4999
embedding_dim=config.emb_dim, # 128
n_labels=config.vocab_size, # 4999
pad_length=config.padding, # 20
encoder_units=config.hidden_dim, # 256
decoder_units=config.hidden_dim, # 256
).model()
discriminator = Discriminator().model()
wgan = WGAN(generator=generator,
reconstructor=reconstructor,
discriminator=discriminator,
)
try:
wgan.train(self.train_batcher.next_batch())
except KeyboardInterrupt as e:
print('WGAN training stopped early.')
print('WGAN training complete.')
if __name__ == '__main__':
train_model = Train()
train_model.setup_train_wgan_model() | [
"denys.lazarenko@tum.de"
] | denys.lazarenko@tum.de |
dcec9dfe44d580ff70c968b38dcb5e9e06fac39d | eb57e632fb351db1975ad0e15bd480759bbc153b | /sysinf/urls.py | 3c44a5f757052a14c4ca9c32626da21663101d8a | [
"MIT"
] | permissive | raikel/dnfas | 163ebc59fc6d4a12c044de33136cdce7ed7ddf0e | 567bcc6656c75ee5167bd248045ec24e37de07b8 | refs/heads/master | 2021-06-27T02:22:30.508109 | 2020-03-25T20:11:04 | 2020-03-25T20:11:04 | 224,517,088 | 0 | 0 | MIT | 2021-03-19T22:52:15 | 2019-11-27T21:07:07 | Python | UTF-8 | Python | false | false | 167 | py | from django.urls import path
from .views import SystemStatsView
app_name = 'sysinf'
urlpatterns = [
path('system/', SystemStatsView.as_view(), name='system')
]
| [
"raikelbl@gmail.com"
] | raikelbl@gmail.com |
1cd4200579144cde0ecc8f38d383d2e16543319b | dc452fb74a667a9e136855cc5683c614b4c9c560 | /mars/urls.py | 96ef9d05776c31b85ba789f43ec1555bbd401b9b | [] | no_license | srinivasthedeveloper/marsAmbulance | f4dc5b4e75ccc4cf9a29cc3d64445d866fbcb78b | cdadea8d4c43a7f64069a0bda201304abb857530 | refs/heads/master | 2022-12-26T18:23:44.950677 | 2020-09-10T02:39:10 | 2020-09-10T02:39:10 | 285,506,566 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | """mars URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('marsapp.urls')),
#path('accounts/',include('accounts.urls')),
]
| [
"srinivasthedeveloper@gmail.com"
] | srinivasthedeveloper@gmail.com |
200d0b3ac6f82822c175ae206d1e3f864d70588f | 0498ee5e261df67d6f7dbd1e610eb03b226b582f | /drf_swagger/asgi.py | ada4fc6f4196e55c01fc6aba37d5fbb09dd0ae15 | [] | no_license | DongDong-123/drf_swagger | daf051eca4a4f37d932176dc967b42dfe66d7f72 | 764354a2328702762cd69ba75051eb6b65fd5365 | refs/heads/master | 2021-07-09T08:03:10.446869 | 2020-01-04T10:18:19 | 2020-01-04T10:18:19 | 231,735,271 | 0 | 0 | null | 2021-03-19T22:57:54 | 2020-01-04T08:57:46 | Python | UTF-8 | Python | false | false | 399 | py | """
ASGI config for drf_swagger project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'drf_swagger.settings')
application = get_asgi_application()
| [
"zisehaiyang04@sina.com"
] | zisehaiyang04@sina.com |
f1289a5fe54f455972fe557978ae48437c78dcc5 | 2ced5d65b460bafa61afc477bf7b3127d13eaee0 | /crypto/helpers.py | ee8bf1848203d3519cefa74532a6899bd3a39cdf | [] | no_license | SirBeanie/lc101 | fb9e5c67ae9123173a8dd028233022ae3cc3c2b7 | 6d3bff07f01c9b68fa83146513a40394e3179026 | refs/heads/master | 2021-01-23T13:30:51.930588 | 2017-09-25T22:41:30 | 2017-09-25T22:41:30 | 102,667,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 616 | py | alphabet = "abcdefghijklmnopqrstuvwxyz"
def alphabet_position(letter):
return alphabet.index(letter.lower())
def rotate_character(char, rot):
if char.isalpha():
new_letter = alphabet_position(char) + rot
if new_letter < 26:
if char == char.lower():
return alphabet[new_letter]
else:
return alphabet[new_letter].upper()
else:
if char == char.lower():
return alphabet[new_letter % 26]
else:
return alphabet [new_letter % 26].upper()
else:
return char
| [
"bendermuhle@my.stlcc.edu"
] | bendermuhle@my.stlcc.edu |
2cae682d74fffef4b2e9ebc4558e3dcfa1dda838 | cc27aad37b7c50b1f89a98a28d7c1604792535ab | /tariff/templatetags/tariff_tags.py | 8ee95bc91a640a62b7e64c3cee0279aaac0af75f | [] | no_license | and-nothing-else/marketplace | 589a1c0b06571454edf38007b52e5fd0c53050f8 | ea398b8cb815c3268ce183489a9c278285c409cd | refs/heads/master | 2016-08-12T05:33:05.803053 | 2016-04-22T21:04:54 | 2016-04-22T21:04:54 | 49,036,460 | 8 | 6 | null | null | null | null | UTF-8 | Python | false | false | 238 | py | from django import template
register = template.Library()
@register.inclusion_tag('tariff/my_tariff_card.html', takes_context=True)
def my_tariff_card(context):
return {
'tariff': context['request'].user.get_tariff()
}
| [
"and.nothing.else@ya.ru"
] | and.nothing.else@ya.ru |
c118e731b64128b330ce49fc1a3dffd313cd5606 | 51eb4a004cfc9f823755082e907fb03aa533626e | /learnlearn/views/cleantha.py | 8d6e81ce155ff96c3208be4b8ea9391ccb90e220 | [] | no_license | zjhsdtc/django_rest_learn | 004d31983bb4937dc55224747470e83c9f5e52ab | ab7cb1f94666a6c3ff1b47b58d7c95f136683f8e | refs/heads/master | 2021-01-22T22:53:04.452919 | 2013-08-30T09:02:00 | 2013-08-30T09:02:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | # __author__ = 'cleantha'
from django.http import HttpResponse
def cleantha(request):
return HttpResponse('hello cleantha') | [
"cleantha@ubuntu.(none)"
] | cleantha@ubuntu.(none) |
8eb6e85672e81112831fde7bad7919bf1cccb3f8 | 448ede205c9beda13307f6b7c4f19cb9efe89264 | /vehicle/migrations/0014_auto_20191221_2021.py | 32ca061d245403541f41b83b750c28e71c027e79 | [] | no_license | sultugamer7/VehicleRentalSystem | 8c68860c37a62dea0c7fee4c3b443ee4f981a28c | 8901bbf7ba55b0f8dd4c0ae17518a5ded9fa4ce6 | refs/heads/master | 2022-04-02T17:33:24.837303 | 2020-01-13T13:19:56 | 2020-01-13T13:19:56 | 233,599,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | # Generated by Django 2.2.6 on 2019-12-21 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('vehicle', '0013_auto_20191221_1446'),
]
operations = [
migrations.AlterField(
model_name='booking',
name='from_date',
field=models.DateField(),
),
migrations.AlterField(
model_name='booking',
name='from_time',
field=models.TimeField(),
),
migrations.AlterField(
model_name='booking',
name='to_date',
field=models.DateField(),
),
migrations.AlterField(
model_name='booking',
name='to_time',
field=models.TimeField(),
),
migrations.AlterUniqueTogether(
name='booking',
unique_together=set(),
),
]
| [
"sultugamer7@gmail.com"
] | sultugamer7@gmail.com |
8348e16c6785697fe7de5e82d5b2cccf17d8a39d | 56231e5b77a8b743e84e43d28691da36b89a0cca | /platform-tools/systrace/catapult/telemetry/telemetry/testing/run_tests_unittest.py | 8728813fb8ee52fb77629c0039869526582c60cf | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | cricketclubucd/davisdragons | ee3aa6ad72197c2218660843e03d58c562b965aa | 99d5877377b80d1b20c78cc3c4c6f26795f29b14 | refs/heads/master | 2023-01-30T05:37:45.923195 | 2021-01-27T06:30:25 | 2021-01-27T06:30:25 | 96,661,120 | 2 | 2 | MIT | 2023-01-23T18:42:26 | 2017-07-09T04:32:10 | HTML | UTF-8 | Python | false | false | 3,762 | py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.core import util
from telemetry.testing import run_tests
class MockArgs(object):
def __init__(self):
self.positional_args = []
self.exact_test_filter = True
self.run_disabled_tests = False
self.skip = []
class MockPossibleBrowser(object):
def __init__(self, browser_type, os_name, os_version_name,
supports_tab_control):
self.browser_type = browser_type
self.platform = MockPlatform(os_name, os_version_name)
self.supports_tab_control = supports_tab_control
class MockPlatform(object):
def __init__(self, os_name, os_version_name):
self.os_name = os_name
self.os_version_name = os_version_name
def GetOSName(self):
return self.os_name
def GetOSVersionName(self):
return self.os_version_name
class RunTestsUnitTest(unittest.TestCase):
def _GetEnabledTests(self, browser_type, os_name, os_version_name,
supports_tab_control, args=None):
if not args:
args = MockArgs()
runner = run_tests.typ.Runner()
host = runner.host
runner.top_level_dir = util.GetTelemetryDir()
runner.args.tests = [host.join(util.GetTelemetryDir(),
'telemetry', 'testing', 'disabled_cases.py')]
possible_browser = MockPossibleBrowser(
browser_type, os_name, os_version_name, supports_tab_control)
runner.classifier = run_tests.GetClassifier(args, possible_browser)
_, test_set = runner.find_tests(runner.args)
return set(test.name.split('.')[-1] for test in test_set.parallel_tests)
def testSystemMacMavericks(self):
self.assertEquals(
set(['testAllEnabled',
'testMacOnly',
'testMavericksOnly',
'testNoChromeOS',
'testNoWinLinux',
'testSystemOnly',
'testHasTabs']),
self._GetEnabledTests('system', 'mac', 'mavericks', True))
def testSystemMacLion(self):
self.assertEquals(
set(['testAllEnabled',
'testMacOnly',
'testNoChromeOS',
'testNoMavericks',
'testNoWinLinux',
'testSystemOnly',
'testHasTabs']),
self._GetEnabledTests('system', 'mac', 'lion', True))
def testCrosGuestChromeOS(self):
self.assertEquals(
set(['testAllEnabled',
'testChromeOSOnly',
'testNoMac',
'testNoMavericks',
'testNoSystem',
'testNoWinLinux',
'testHasTabs']),
self._GetEnabledTests('cros-guest', 'chromeos', '', True))
def testCanaryWindowsWin7(self):
self.assertEquals(
set(['testAllEnabled',
'testNoChromeOS',
'testNoMac',
'testNoMavericks',
'testNoSystem',
'testWinOrLinuxOnly',
'testHasTabs']),
self._GetEnabledTests('canary', 'win', 'win7', True))
def testDoesntHaveTabs(self):
self.assertEquals(
set(['testAllEnabled',
'testNoChromeOS',
'testNoMac',
'testNoMavericks',
'testNoSystem',
'testWinOrLinuxOnly']),
self._GetEnabledTests('canary', 'win', 'win7', False))
def testSkip(self):
args = MockArgs()
args.skip = ['telemetry.*testNoMac', '*NoMavericks',
'telemetry.testing.disabled_cases.DisabledCases.testNoSystem']
self.assertEquals(
set(['testAllEnabled',
'testNoChromeOS',
'testWinOrLinuxOnly',
'testHasTabs']),
self._GetEnabledTests('canary', 'win', 'win7', True, args))
| [
"jena.suraj.k@gmail.com"
] | jena.suraj.k@gmail.com |
bb1db72e1417f503a51c53cab45015887b5df63a | 8ba041911be24ba453d6df60ddf47e7d2aedfde5 | /model.py | ff354766fccfa1efd6fe85425ef183d0be6f6c83 | [] | no_license | dmcdekker/testing-1 | 9c0beda3fbdb9d37a812e903800f4c976cd0bbae | ee6cbab6aec40adde9971005d9c79862fb3bfe7a | refs/heads/master | 2020-03-15T14:07:38.046358 | 2018-05-04T19:58:29 | 2018-05-04T19:58:29 | 132,183,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | from flask.ext.sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class Game(db.Model):
"""Board game."""
__tablename__ = "games"
game_id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(20), nullable=False, unique=True)
description = db.Column(db.String(100))
def connect_to_db(app, db_uri="postgresql:///games"):
app.config['SQLALCHEMY_DATABASE_URI'] = db_uri
db.app = app
db.init_app(app)
def example_data():
"""Create example data for the test database."""
#FIXME: write a function that creates a game and adds it to the database.
game1 = Game(name="My Little Pony", description="A pony game")
game2 = Game(name="Good or Evil", description="Are you good or evil?!")
db.session.add_all([game1, game2])
db.session.commit()
if __name__ == '__main__':
from server import app
connect_to_db(app)
print "Connected to DB."
| [
"no-reply@hackbrightacademy.com"
] | no-reply@hackbrightacademy.com |
d44f01bcd4e7d2b34ab46450cdb1c6ab87d512a1 | f3daf8a0bf10c38e8a96b518aa08195241adf7cb | /HW1b/search.py | 5bcde67610641f501f48b5b43d19114b792f6787 | [] | no_license | trademark152/Artificial_Intelligence_USC | c9dc8e70a6bc2228ccfaeb911e497de82b4f7b9a | 5e3464c9af84786d540fe74a275f835395d6836a | refs/heads/master | 2020-09-26T09:31:06.840819 | 2019-12-06T02:16:56 | 2019-12-06T02:16:56 | 226,227,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,545 | py | """Search (Chapters 3-4)
The way to use this code is to subclass Problem to create a class of problems,
then create problem instances and solve them with calls to the various search
functions."""
from __future__ import generators
from utils import *
import agents
import math, random, sys, time, bisect, string
# ______________________________________________________________________________
class Problem:
"""The abstract class for a formal problem. You should subclass this and
implement the method successor, and possibly __init__, goal_test, and
path_cost. Then you will create instances of your subclass and solve them
with the various search functions."""
def __init__(self, initial, goal=None):
"""The constructor specifies the initial state, and possibly a goal
state, if there is a unique goal. Your subclass's constructor can add
other arguments."""
self.initial = initial ; self.goal = goal
def successor(self, state):
"""Given a state, return a sequence of (action, state) pairs reachable
from this state. If there are many successors, consider an iterator
that yields the successors one at a time, rather than building them
all at once. Iterators will work fine within the framework."""
abstract
def goal_test(self, state):
"""Return True if the state is a goal. The default method compares the
state to self.goal, as specified in the constructor. Implement this
method if checking against a single self.goal is not enough."""
return state == self.goal
def path_cost(self, c, state1, action, state2):
"""Return the cost of a solution path that arrives at state2 from
state1 via action, assuming cost c to get up to state1. If the problem
is such that the path doesn't matter, this function will only look at
state2. If the path does matter, it will consider c and maybe state1
and action. The default method costs 1 for every step in the path."""
return c + 1
def value(self):
"""For optimization problems, each state has a value. Hill-climbing
and related algorithms try to maximize this value."""
abstract
# ______________________________________________________________________________
class Node:
"""A node in a search tree. Contains a pointer to the parent (the node
that this is a successor of) and to the actual state for this node. Note
that if a state is arrived at by two paths, then there are two nodes with
the same state. Also includes the action that got us to this state, and
the total path_cost (also known as g) to reach the node. Other functions
may add an f and h value; see best_first_graph_search and astar_search for
an explanation of how the f and h values are handled. You will not need to
subclass this class."""
def __init__(self, state, parent=None, action=None, path_cost=0):
"Create a search tree Node, derived from a parent by an action."
update(self, state=state, parent=parent, action=action,
path_cost=path_cost, depth=0)
if parent:
self.depth = parent.depth + 1
def __repr__(self):
return "<Node %s>" % (self.state,)
def path(self):
"Create a list of nodes from the root to this node."
x, result = self, [self]
while x.parent:
result.append(x.parent)
x = x.parent
return result
def expand(self, problem):
"Return a list of nodes reachable from this node. [Fig. 3.8]"
return [Node(next, self, act,
problem.path_cost(self.path_cost, self.state, act, next))
for (act, next) in problem.successor(self.state)]
# ______________________________________________________________________________
class SimpleProblemSolvingAgent(agents.Agent):
"""Abstract framework for problem-solving agent. [Fig. 3.1]"""
def __init__(self):
Agent.__init__(self)
state = []
seq = []
def program(percept):
state = self.update_state(state, percept)
if not seq:
goal = self.formulate_goal(state)
problem = self.formulate_problem(state, goal)
seq = self.search(problem)
action = seq[0]
seq[0:1] = []
return action
self.program = program
# ______________________________________________________________________________
## Uninformed Search algorithms
def tree_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
Don't worry about repeated paths to a state. [Fig. 3.8]"""
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
fringe.extend(node.expand(problem))
return None
def breadth_first_tree_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return tree_search(problem, FIFOQueue())
def depth_first_tree_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return tree_search(problem, Stack())
def graph_search(problem, fringe):
"""Search through the successors of a problem to find a goal.
The argument fringe should be an empty queue.
If two paths reach a state, only use the best one. [Fig. 3.18]"""
closed = {}
fringe.append(Node(problem.initial))
while fringe:
node = fringe.pop()
if problem.goal_test(node.state):
return node
if node.state not in closed:
closed[node.state] = True
fringe.extend(node.expand(problem))
return None
def breadth_first_graph_search(problem):
"Search the shallowest nodes in the search tree first. [p 74]"
return graph_search(problem, FIFOQueue())
def depth_first_graph_search(problem):
"Search the deepest nodes in the search tree first. [p 74]"
return graph_search(problem, Stack())
def depth_limited_search(problem, limit=50):
"[Fig. 3.12]"
def recursive_dls(node, problem, limit):
cutoff_occurred = False
if problem.goal_test(node.state):
return node
elif node.depth == limit:
return 'cutoff'
else:
for successor in node.expand(problem):
result = recursive_dls(successor, problem, limit)
if result == 'cutoff':
cutoff_occurred = True
elif result != None:
return result
if cutoff_occurred:
return 'cutoff'
else:
return None
# Body of depth_limited_search:
return recursive_dls(Node(problem.initial), problem, limit)
def iterative_deepening_search(problem):
"[Fig. 3.13]"
for depth in xrange(sys.maxint):
result = depth_limited_search(problem, depth)
if result is not 'cutoff':
return result
# ______________________________________________________________________________
# Informed (Heuristic) Search
def best_first_graph_search(problem, f):
"""Search the nodes with the lowest f scores first.
You specify the function f(node) that you want to minimize; for example,
if f is a heuristic estimate to the goal, then we have greedy best
first search; if f is node.depth then we have depth-first search.
There is a subtlety: the line "f = memoize(f, 'f')" means that the f
values will be cached on the nodes as they are computed. So after doing
a best first search you can examine the f values of the path returned."""
f = memoize(f, 'f')
return graph_search(problem, PriorityQueue(min, f))
greedy_best_first_graph_search = best_first_graph_search
# Greedy best-first search is accomplished by specifying f(n) = h(n).
def astar_search(problem, h=None):
"""A* search is best-first graph search with f(n) = g(n)+h(n).
You need to specify the h function when you call astar_search.
Uses the pathmax trick: f(n) = max(f(n), g(n)+h(n))."""
h = h or problem.h
def f(n):
return max(getattr(n, 'f', -infinity), n.path_cost + h(n))
return best_first_graph_search(problem, f)
# ______________________________________________________________________________
## Other search algorithms
def recursive_best_first_search(problem):
"[Fig. 4.5]"
def RBFS(problem, node, flimit):
if problem.goal_test(node.state):
return node
successors = expand(node, problem)
if len(successors) == 0:
return None, infinity
for s in successors:
s.f = max(s.path_cost + s.h, node.f)
while True:
successors.sort(lambda x, y: x.f - y.f) # Order by lowest f value
best = successors[0]
if best.f > flimit:
return None, best.f
alternative = successors[1]
result, best.f = RBFS(problem, best, min(flimit, alternative))
if result is not None:
return result
return RBFS(Node(problem.initial), infinity)
def hill_climbing(problem):
"""From the initial node, keep choosing the neighbor with highest value,
stopping when no neighbor is better. [Fig. 4.11]"""
current = Node(problem.initial)
while True:
neighbor = argmax(expand(node, problem), Node.value)
if neighbor.value() <= current.value():
return current.state
current = neighbor
def exp_schedule(k=20, lam=0.005, limit=100):
"One possible schedule function for simulated annealing"
return lambda t: if_(t < limit, k * math.exp(-lam * t), 0)
def simulated_annealing(problem, schedule=exp_schedule()):
"[Fig. 4.5]"
current = Node(problem.initial)
for t in xrange(sys.maxint):
T = schedule(t)
if T == 0:
return current
next = random.choice(expand(node.problem))
delta_e = next.path_cost - current.path_cost
if delta_e > 0 or probability(math.exp(delta_e / T)):
current = next
def online_dfs_agent(a):
"[Fig. 4.12]"
pass #### more
def lrta_star_agent(a):
"[Fig. 4.12]"
pass #### more
# ______________________________________________________________________________
# Genetic Algorithm
def genetic_search(problem, fitness_fn, ngen=1000, pmut=0.0, n=20):
"""Call genetic_algorithm on the appropriate parts of a problem.
This requires that the problem has a successor function that generates
reasonable states, and that it has a path_cost function that scores states.
We use the negative of the path_cost function, because costs are to be
minimized, while genetic-algorithm expects a fitness_fn to be maximized."""
states = [s for (a, s) in problem.successor(problem.initial_state)[:n]]
random.shuffle(states)
fitness_fn = lambda s: - problem.path_cost(0, s, None, s)
return genetic_algorithm(states, fitness_fn, ngen, pmut)
def genetic_algorithm(population, fitness_fn, ngen=1000, pmut=0.0):
"""[Fig. 4.7]"""
def reproduce(p1, p2):
c = random.randrange(len(p1))
return p1[:c] + p2[c:]
for i in range(ngen):
new_population = []
for i in len(population):
p1, p2 = random_weighted_selections(population, 2, fitness_fn)
child = reproduce(p1, p2)
if random.uniform(0, 1) > pmut:
child.mutate()
new_population.append(child)
population = new_population
return argmax(population, fitness_fn)
def random_weighted_selection(seq, n, weight_fn):
"""Pick n elements of seq, weighted according to weight_fn.
That is, apply weight_fn to each element of seq, add up the total.
Then choose an element e with probability weight[e]/total.
Repeat n times, with replacement. """
totals = [];
runningtotal = 0
for item in seq:
runningtotal += weight_fn(item)
totals.append(runningtotal)
selections = []
for s in range(n):
r = random.uniform(0, totals[-1])
for i in range(len(seq)):
if totals[i] > r:
selections.append(seq[i])
break
return selections
# _____________________________________________________________________________
# The remainder of this file implements examples for the search algorithms.
# ______________________________________________________________________________
# Graphs and Graph Problems
class Graph:
"""A graph connects nodes (verticies) by edges (links). Each edge can also
have a length associated with it. The constructor call is something like:
g = Graph({'A': {'B': 1, 'C': 2})
this makes a graph with 3 nodes, A, B, and C, with an edge of length 1 from
A to B, and an edge of length 2 from A to C. You can also do:
g = Graph({'A': {'B': 1, 'C': 2}, directed=False)
This makes an undirected graph, so inverse links are also added. The graph
stays undirected; if you add more links with g.connect('B', 'C', 3), then
inverse link is also added. You can use g.nodes() to get a list of nodes,
g.get('A') to get a dict of links out of A, and g.get('A', 'B') to get the
length of the link from A to B. 'Lengths' can actually be any object at
all, and nodes can be any hashable object."""
def __init__(self, dict=None, directed=True):
self.dict = dict or {}
self.directed = directed
if not directed: self.make_undirected()
def make_undirected(self):
"Make a digraph into an undirected graph by adding symmetric edges."
for a in self.dict.keys():
for (b, distance) in self.dict[a].items():
self.connect1(b, a, distance)
def connect(self, A, B, distance=1):
"""Add a link from A and B of given distance, and also add the inverse
link if the graph is undirected."""
self.connect1(A, B, distance)
if not self.directed: self.connect1(B, A, distance)
def connect1(self, A, B, distance):
"Add a link from A to B of given distance, in one direction only."
self.dict.setdefault(A, {})[B] = distance
def get(self, a, b=None):
"""Return a link distance or a dict of {node: distance} entries.
.get(a,b) returns the distance or None;
.get(a) returns a dict of {node: distance} entries, possibly {}."""
links = self.dict.setdefault(a, {})
if b is None:
return links
else:
return links.get(b)
def nodes(self):
"Return a list of nodes in the graph."
return self.dict.keys()
def UndirectedGraph(dict=None):
"Build a Graph where every edge (including future ones) goes both ways."
return Graph(dict=dict, directed=False)
def RandomGraph(nodes=range(10), min_links=2, width=400, height=300,
curvature=lambda: random.uniform(1.1, 1.5)):
"""Construct a random graph, with the specified nodes, and random links.
The nodes are laid out randomly on a (width x height) rectangle.
Then each node is connected to the min_links nearest neighbors.
Because inverse links are added, some nodes will have more connections.
The distance between nodes is the hypotenuse times curvature(),
where curvature() defaults to a random number between 1.1 and 1.5."""
g = UndirectedGraph()
g.locations = {}
## Build the cities
for node in nodes:
g.locations[node] = (random.randrange(width), random.randrange(height))
## Build roads from each city to at least min_links nearest neighbors.
for i in range(min_links):
for node in nodes:
if len(g.get(node)) < min_links:
here = g.locations[node]
def distance_to_node(n):
if n is node or g.get(node, n): return infinity
return distance(g.locations[n], here)
neighbor = argmin(nodes, distance_to_node)
d = distance(g.locations[neighbor], here) * curvature()
g.connect(node, neighbor, int(d))
return g
romania = UndirectedGraph(Dict(
A=Dict(Z=75, S=140, T=118),
B=Dict(U=85, P=101, G=90, F=211),
C=Dict(D=120, R=146, P=138),
D=Dict(M=75),
E=Dict(H=86),
F=Dict(S=99),
H=Dict(U=98),
I=Dict(V=92, N=87),
L=Dict(T=111, M=70),
O=Dict(Z=71, S=151),
P=Dict(R=97),
R=Dict(S=80),
U=Dict(V=142)))
romania.locations = Dict(
A=(91, 492), B=(400, 327), C=(253, 288), D=(165, 299),
E=(562, 293), F=(305, 449), G=(375, 270), H=(534, 350),
I=(473, 506), L=(165, 379), M=(168, 339), N=(406, 537),
O=(131, 571), P=(320, 368), R=(233, 410), S=(207, 457),
T=(94, 410), U=(456, 350), V=(509, 444), Z=(108, 531))
australia = UndirectedGraph(Dict(
T=Dict(),
SA=Dict(WA=1, NT=1, Q=1, NSW=1, V=1),
NT=Dict(WA=1, Q=1),
NSW=Dict(Q=1, V=1)))
australia.locations = Dict(WA=(120, 24), NT=(135, 20), SA=(135, 30),
Q=(145, 20), NSW=(145, 32), T=(145, 42), V=(145, 37))
class GraphProblem(Problem):
"The problem of searching a graph from one node to another."
def __init__(self, initial, goal, graph):
Problem.__init__(self, initial, goal)
self.graph = graph
def successor(self, A):
"Return a list of (action, result) pairs."
return [(B, B) for B in self.graph.get(A).keys()]
def path_cost(self, cost_so_far, A, action, B):
return cost_so_far + (self.graph.get(A, B) or infinity)
def h(self, node):
"h function is straight-line distance from a node's state to goal."
locs = getattr(self.graph, 'locations', None)
if locs:
return int(distance(locs[node.state], locs[self.goal]))
else:
return infinity
# ______________________________________________________________________________
#### NOTE: NQueensProblem not working properly yet.
class NQueensProblem(Problem):
"""The problem of placing N queens on an NxN board with none attacking
each other. A state is represented as an N-element array, where the
a value of r in the c-th entry means there is a queen at column c,
row r, and a value of None means that the c-th column has not been
filled in left. We fill in columns left to right."""
def __init__(self, N):
self.N = N
self.initial = [None] * N
def successor(self, state):
"In the leftmost empty column, try all non-conflicting rows."
if state[-1] is not None:
return [] ## All columns filled; no successors
else:
def place(col, row):
new = state[:]
new[col] = row
return new
col = state.index(None)
return [(row, place(col, row)) for row in range(self.N)
if not self.conflicted(state, row, col)]
def conflicted(self, state, row, col):
"Would placing a queen at (row, col) conflict with anything?"
for c in range(col - 1):
if self.conflict(row, col, state[c], c):
return True
return False
def conflict(self, row1, col1, row2, col2):
"Would putting two queens in (row1, col1) and (row2, col2) conflict?"
return (row1 == row2 ## same row
or col1 == col2 ## same column
or row1 - col1 == row2 - col2 ## same \ diagonal
or row1 + col1 == row2 + col2) ## same / diagonal
def goal_test(self, state):
"Check if all columns filled, no conflicts."
if state[-1] is None:
return False
for c in range(len(state)):
if self.conflicted(state, state[c], c):
return False
return True
# ______________________________________________________________________________
## Inverse Boggle: Search for a high-scoring Boggle board. A good domain for
## iterative-repair and related search tehniques, as suggested by Justin Boyan.
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cubes16 = ['FORIXB', 'MOQABJ', 'GURILW', 'SETUPL',
'CMPDAE', 'ACITAO', 'SLCRAE', 'ROMASH',
'NODESW', 'HEFIYE', 'ONUDTK', 'TEVIGN',
'ANEDVZ', 'PINESH', 'ABILYT', 'GKYLEU']
def random_boggle(n=4):
"""Return a random Boggle board of size n x n.
We represent a board as a linear list of letters."""
cubes = [cubes16[i % 16] for i in range(n * n)]
random.shuffle(cubes)
return map(random.choice, cubes)
## The best 5x5 board found by Boyan, with our word list this board scores
## 2274 words, for a score of 9837
boyan_best = list('RSTCSDEIAEGNLRPEATESMSSID')
def print_boggle(board):
"Print the board in a 2-d array."
n2 = len(board);
n = exact_sqrt(n2)
for i in range(n2):
if i % n == 0: print
if board[i] == 'Q': print
'Qu',
else: print
str(board[i]) + ' ',
print
def boggle_neighbors(n2, cache={}):
""""Return a list of lists, where the i-th element is the list of indexes
for the neighbors of square i."""
if cache.get(n2):
return cache.get(n2)
n = exact_sqrt(n2)
neighbors = [None] * n2
for i in range(n2):
neighbors[i] = []
on_top = i < n
on_bottom = i >= n2 - n
on_left = i % n == 0
on_right = (i + 1) % n == 0
if not on_top:
neighbors[i].append(i - n)
if not on_left: neighbors[i].append(i - n - 1)
if not on_right: neighbors[i].append(i - n + 1)
if not on_bottom:
neighbors[i].append(i + n)
if not on_left: neighbors[i].append(i + n - 1)
if not on_right: neighbors[i].append(i + n + 1)
if not on_left: neighbors[i].append(i - 1)
if not on_right: neighbors[i].append(i + 1)
cache[n2] = neighbors
return neighbors
def exact_sqrt(n2):
"If n2 is a perfect square, return its square root, else raise error."
n = int(math.sqrt(n2))
assert n * n == n2
return n
##_____________________________________________________________________________
class Wordlist:
"""This class holds a list of words. You can use (word in wordlist)
to check if a word is in the list, or wordlist.lookup(prefix)
to see if prefix starts any of the words in the list."""
def __init__(self, filename, min_len=3):
lines = open(filename).read().upper().split()
self.words = [word for word in lines if len(word) >= min_len]
self.words.sort()
self.bounds = {}
for c in ALPHABET:
c2 = chr(ord(c) + 1)
self.bounds[c] = (bisect.bisect(self.words, c),
bisect.bisect(self.words, c2))
def lookup(self, prefix, lo=0, hi=None):
"""See if prefix is in dictionary, as a full word or as a prefix.
Return two values: the first is the lowest i such that
words[i].startswith(prefix), or is None; the second is
True iff prefix itself is in the Wordlist."""
words = self.words
i = bisect.bisect_left(words, prefix, lo, hi)
if i < len(words) and words[i].startswith(prefix):
return i, (words[i] == prefix)
else:
return None, False
def __contains__(self, word):
return self.words[bisect.bisect_left(self.words, word)] == word
def __len__(self):
return len(self.words)
##_____________________________________________________________________________
class BoggleFinder:
"""A class that allows you to find all the words in a Boggle board. """
wordlist = None ## A class variable, holding a wordlist
def __init__(self, board=None):
if BoggleFinder.wordlist is None:
BoggleFinder.wordlist = Wordlist("../data/wordlist")
self.found = {}
if board:
self.set_board(board)
def set_board(self, board=None):
"Set the board, and find all the words in it."
if board is None:
board = random_boggle()
self.board = board
self.neighbors = boggle_neighbors(len(board))
self.found = {}
for i in range(len(board)):
lo, hi = self.wordlist.bounds[board[i]]
self.find(lo, hi, i, [], '')
return self
def find(self, lo, hi, i, visited, prefix):
"""Looking in square i, find the words that continue the prefix,
considering the entries in self.wordlist.words[lo:hi], and not
revisiting the squares in visited."""
if i in visited:
return
wordpos, is_word = self.wordlist.lookup(prefix, lo, hi)
if wordpos is not None:
if is_word:
self.found[prefix] = True
visited.append(i)
c = self.board[i]
if c == 'Q': c = 'QU'
prefix += c
for j in self.neighbors[i]:
self.find(wordpos, hi, j, visited, prefix)
visited.pop()
def words(self):
"The words found."
return self.found.keys()
scores = [0, 0, 0, 0, 1, 2, 3, 5] + [11] * 100
def score(self):
"The total score for the words found, according to the rules."
return sum([self.scores[len(w)] for w in self.words()])
def __len__(self):
"The number of words found."
return len(self.found)
##_____________________________________________________________________________
def boggle_hill_climbing(board=None, ntimes=100, print_it=True):
"""Solve inverse Boggle by hill-climbing: find a high-scoring board by
starting with a random one and changing it."""
finder = BoggleFinder()
if board is None:
board = random_boggle()
best = len(finder.set_board(board))
for _ in range(ntimes):
i, oldc = mutate_boggle(board)
new = len(finder.set_board(board))
if new > best:
best = new
print
best, _, board
else:
board[i] = oldc ## Change back
if print_it:
print_boggle(board)
return board, best
def mutate_boggle(board):
i = random.randrange(len(board))
oldc = board[i]
board[i] = random.choice(random.choice(cubes16)) ##random.choice(boyan_best)
return i, oldc
# ______________________________________________________________________________
## Code to compare searchers on various problems.
class InstrumentedProblem(Problem):
"""Delegates to a problem, and keeps statistics."""
def __init__(self, problem):
self.problem = problem
self.succs = self.goal_tests = self.states = 0
self.found = None
def successor(self, state):
"Return a list of (action, state) pairs reachable from this state."
result = self.problem.successor(state)
self.succs += 1;
self.states += len(result)
return result
def goal_test(self, state):
"Return true if the state is a goal."
self.goal_tests += 1
result = self.problem.goal_test(state)
if result:
self.found = state
return result
def __getattr__(self, attr):
if attr in ('succs', 'goal_tests', 'states'):
return self.__dict__[attr]
else:
return getattr(self.problem, attr)
def __repr__(self):
return '<%4d/%4d/%4d/%s>' % (self.succs, self.goal_tests,
self.states, str(self.found)[0:4])
def compare_searchers(problems, header, searchers=[breadth_first_tree_search,
breadth_first_graph_search, depth_first_graph_search,
iterative_deepening_search, depth_limited_search,
astar_search]):
def do(searcher, problem):
p = InstrumentedProblem(problem)
searcher(p)
return p
table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
print_table(table, header)
def compare_graph_searchers():
compare_searchers(problems=[GraphProblem('A', 'B', romania),
GraphProblem('O', 'N', romania),
GraphProblem('Q', 'WA', australia)],
header=['Searcher', 'Romania(A,B)', 'Romania(O, N)', 'Australia'])
| [
"trademark152@gmail.com"
] | trademark152@gmail.com |
a267eeadcc4741e654779332cfcd1081871b1d08 | 752cdeaec83216bb68b2cad4880c7543696edddf | /old-procedures/HistogramWithSliders.py | c40dfdae8ac887b08d492751907e8553e54925a0 | [] | no_license | lfl-main/measurement-code | 98ddc6d07319d57ce4975fe7be55f23fa6fc42e9 | 8cb77286a0e7b27040fad435e942a84eb6e27028 | refs/heads/master | 2020-06-28T03:36:32.001652 | 2019-08-02T00:51:09 | 2019-08-02T00:51:09 | 200,133,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,618 | py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
# parameters
nSkipTime = 1.5 # microseconds
nAvgTime = 14.5 # microseconds
nSampleRate = 500 # MegaSample/second
nBins = 80 # number of bins for histogram
nSamplesPerTrace = 28672 # number of samples for a single trace
# convert times to indices
iSkip = int(nSampleRate*nSkipTime)
iAvg = int(nSampleRate*nAvgTime)
# get data and reshape it into an array of individual traces
data = np.fromfile(r'E:\rawData\NIST_Q4_LongerTraces.bin')
nTraces = int(len(data)/nSamplesPerTrace)
data = data.reshape((nTraces,nSamplesPerTrace))
# Calculate the averaged data
fAvg = [np.mean(data[i,iSkip:iSkip+iAvg]) for i in range(nTraces)]
fig, axs = plt.subplots(2)
plt.subplots_adjust(bottom=0.25)
fig.suptitle('Single Shot Averaging Time: {} microseconds'.format(nAvgTime))
l1, = axs[0].plot(fAvg[0:200],'+')
axs[1].hist(fAvg, bins=nBins)
# make sliders
axcolor = 'lightgoldenrodyellow'
axSkip = plt.axes([0.25,0.1,0.65,0.03],facecolor=axcolor)
axAvg = plt.axes([0.25,0.15,0.65,0.03],facecolor=axcolor)
sSkipTime = Slider(axSkip, 'Skip (us)', 0.3, 5,valinit=nSkipTime,valstep=0.1)
sAvgTime = Slider(axAvg,'Average (us)', 2, 50, valinit=nAvgTime,valstep=0.1)
def update(val):
Skip = int(nSampleRate*sSkipTime.val)
Avg = int(nSampleRate*sAvgTime.val)
vAvg = [np.mean(data[i,Skip:Skip+Avg]) for i in range(nTraces)]
l1.set_ydata(vAvg[0:200])
axs[1].cla()
axs[1].hist(vAvg, bins=nBins)
fig.canvas.draw_idle()
sSkipTime.on_changed(update)
sAvgTime.on_changed(update)
plt.show()
| [
"jtfarmer@usc.edu"
] | jtfarmer@usc.edu |
9f100ff9a8239b02426973558c1a1cb5541215b8 | 8fd027decde498a0d84e0ea4e5a3cf885a150820 | /regex.py | 43454ea66dca538c5bf7b66e262810be5b89b5fa | [] | no_license | akstechies/basic-python | 23d84e8c1c05253142dd6d163c40a4d614ea059d | 4bedaf26a7bd7fe935f2c16af92c62f678ccfa8e | refs/heads/master | 2022-12-21T07:25:34.128786 | 2020-09-23T06:37:24 | 2020-09-23T06:37:24 | 297,876,128 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py | #regex
import re
txt = 'My name is Ayush'
x = re.search('^My.*Ayush$', txt)
print(x)
if (x):
print('found')
else:
print('absent')
y = re.findall('y', txt)
print(y)
z = re.search('\s', txt) #returns white spaces
print(z)
print(z.start())
x = re.split("\s", txt)
print(x)
#x = re.sub("\s", "9", txt) #without control
x = re.sub("\s", "10", txt, 2) #with control
print(x) | [
"ayushasri95@gmail.com"
] | ayushasri95@gmail.com |
ef72f90b4618f71775f04d5f3fddba59d2aabd8c | 1d7d2fd06a1c1f6ac6fd1f1f01578791bd175bc0 | /greengrass/mainpage/migrations/0001_initial.py | dc6efe5addcb9953715fd3a643220523984abe23 | [] | no_license | nedialkom/greengrass | 27f40164452f44f73d038a3ec33619cbbba7660a | ebe29370947f32f16b094b9d7671598b26e875ff | refs/heads/main | 2023-02-07T01:08:17.297642 | 2020-12-27T14:32:51 | 2020-12-27T14:32:51 | 322,667,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | # Generated by Django 2.1.5 on 2020-12-20 19:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Status',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timestamp', models.IntegerField()),
('current_moisture', models.IntegerField()),
('target_moisture', models.IntegerField()),
('mode', models.CharField(max_length=6)),
('relay_target_status', models.CharField(max_length=3)),
('relay_current_state', models.CharField(max_length=3)),
],
),
]
| [
"nedialkom@gmail.com"
] | nedialkom@gmail.com |
4ba2bc1874c374866162200de9d8e56984492a87 | 4a512701620b812e72f0bdf2d70a06d47b810cb3 | /Lib/site-packages/PIL/FontFile.py | 1ccfaa3c32889778e117521a1da0edac50f48b5d | [
"LicenseRef-scancode-secret-labs-2011",
"BSD-2-Clause",
"MIT"
] | permissive | kmacho16/Portafolio-Django | 7f734abda1592f5f4fae9c245b59d57ff8b5684e | 2a4409dbf5c904ad146cd00734b8e7cf4993ec2a | refs/heads/master | 2022-10-22T18:51:27.693530 | 2017-04-25T21:03:13 | 2017-04-25T21:03:13 | 89,301,018 | 0 | 2 | MIT | 2022-10-01T03:36:02 | 2017-04-25T00:54:47 | Python | UTF-8 | Python | false | false | 2,778 | py | #
# The Python Imaging Library
# $Id$
#
# base class for raster font file parsers
#
# history:
# 1997-06-05 fl created
# 1997-08-19 fl restrict image width
#
# Copyright (c) 1997-1998 by Secret Labs AB
# Copyright (c) 1997-1998 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import os
from PIL import Image, _binary
WIDTH = 800
def puti16(fp, values):
# write network order (big-endian) 16-bit sequence
for v in values:
if v < 0:
v += 65536
fp.write(_binary.o16be(v))
##
# Base class for raster font file handlers.
class FontFile(object):
bitmap = None
def __init__(self):
self.info = {}
self.glyph = [None] * 256
def __getitem__(self, ix):
return self.glyph[ix]
def compile(self):
"Create metrics and bitmap"
if self.bitmap:
return
# create bitmap large enough to hold all data
h = w = maxwidth = 0
lines = 1
for glyph in self:
if glyph:
d, dst, src, im = glyph
h = max(h, src[3] - src[1])
w = w + (src[2] - src[0])
if w > WIDTH:
lines += 1
w = (src[2] - src[0])
maxwidth = max(maxwidth, w)
xsize = maxwidth
ysize = lines * h
if xsize == 0 and ysize == 0:
return ""
self.ysize = h
# paste glyphs into bitmap
self.bitmap = Image.new("1", (xsize, ysize))
self.metrics = [None] * 256
x = y = 0
for i in range(256):
glyph = self[i]
if glyph:
d, dst, src, im = glyph
xx = src[2] - src[0]
# yy = src[3] - src[1]
x0, y0 = x, y
x = x + xx
if x > WIDTH:
x, y = 0, y + h
x0, y0 = x, y
x = xx
s = src[0] + x0, src[1] + y0, src[2] + x0, src[3] + y0
self.bitmap.paste(im.crop(src), s)
# print chr(i), dst, s
self.metrics[i] = d, dst, s
def save(self, filename):
"Save font"
self.compile()
# font data
self.bitmap.save(os.path.splitext(filename)[0] + ".pbm", "PNG")
# font metrics
fp = open(os.path.splitext(filename)[0] + ".pil", "wb")
fp.write(b"PILfont\n")
fp.write((";;;;;;%d;\n" % self.ysize).encode('ascii')) # HACK!!!
fp.write(b"DATA\n")
for id in range(256):
m = self.metrics[id]
if not m:
puti16(fp, [0] * 10)
else:
puti16(fp, m[0] + m[1] + m[2])
fp.close()
| [
"Videos"
] | Videos |
76cb674d77d91c26c5a4d838238afa84b3ffd864 | 9d6b9dde730c254e2e15658e62c8a322a5a17084 | /Python/trcontrol/framework/filter/discrete.py | 3c5e9b2d5dfc92b64ccb9fdf1a43c5c231491efb | [] | no_license | irom-lab/task-relevant-control | 94ce072485f394d6439db6451f74d078a64021f3 | d77fe7e5fc3168ecce8099bb46fc7b59ad79e0b7 | refs/heads/master | 2020-03-26T09:53:30.230106 | 2019-06-19T18:52:16 | 2019-06-19T18:52:16 | 144,770,544 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,240 | py | import numpy as np
import trcontrol.framework.prob.dists as dists
import trcontrol.framework.prob.channels as channels
from .bayes import BayesFilter
from ..control import DSCProblem, Policy
class DiscreteFilter(BayesFilter):
def __init__(self, problem: DSCProblem, policy: Policy, init_meas: int) -> None:
super().__init__(problem, policy, init_meas)
self._dynamics = problem.dynamics_tensor
self._sensor = problem.sensor_tensor
def mle(self) -> int:
return np.argmax(self._belief.pmf())
def process_update(self, belief: dists.FiniteDist, t: int) -> dists.FiniteDist:
(n, _, m) = self._dynamics.shape
belief_pmf = belief.pmf()
next_belief_given_input = np.zeros(n, m)
for i in range(m):
next_belief_given_input[:, i] = self._dynamics.shape[:, :, i] @ belief_pmf
input_dist = self._policy.input_channel(t).marginal(dists.FiniteDist(belief_pmf))
return dists.FiniteDist(next_belief_given_input @ input_dist.pmf())
def measurement_update(self, proc_belief: dists.FiniteDist, meas: int, t: int) -> dists.FiniteDist:
channel = channels.DiscreteChannel(self._sensor)
return channel.posterior(proc_belief, meas)
| [
"vpacelli@princeton.edu"
] | vpacelli@princeton.edu |
0b1cde1c5f80af4837b8282ef80c77174dc1c5e7 | 12f18662719d04d2404396b9059b60525528f557 | /findsportsordermanagement-master/purchaseorder/migrations/0018_purchaseorder_internal_notes.py | 45c5291a9d14c87910376425849d88f1c857c904 | [] | no_license | ujjalgoswami/ordermanagementcustomdashboard | 0bf4a5770d1913b257a43858d778e630e671a342 | acd18510b0934601d30bd717ea4b3fbb61ecfb5c | refs/heads/master | 2021-02-04T10:04:27.380674 | 2020-02-28T01:37:35 | 2020-02-28T01:37:35 | 243,653,613 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | # Generated by Django 2.2.4 on 2019-12-10 15:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('purchaseorder', '0017_orderline_reorder'),
]
operations = [
migrations.AddField(
model_name='purchaseorder',
name='internal_notes',
field=models.TextField(null=True),
),
]
| [
"ujjalgoswami92@gmail.com"
] | ujjalgoswami92@gmail.com |
aa0db1f58ce91da19a7d41999876cac604b17579 | b086de93e9769490ec30c86da137b500f511234f | /accounts/views.py | 7122b888b28f984c581db957d7d4a7b1c4c10c7a | [] | no_license | tomoyasakamoto32/anipho-django | c0d256418e2d2d8b8d2508f7dd06e9b5d0137288 | c270c95399d879decb6ed225e7adb86df56776be | refs/heads/master | 2023-06-07T19:42:51.755080 | 2021-06-30T13:56:56 | 2021-06-30T13:56:56 | 381,719,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | from django.shortcuts import render
import os
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView
from django.contrib.auth.views import LoginView, LogoutView
from .models import User
from .forms import UserCreationForm, UserLoginForm
class UserCreateView(CreateView):
model = User
template_name = os.path.join('accounts', 'user_create.html')
form_class = UserCreationForm
success_url = reverse_lazy('accounts:sample_user')
def sample(request):
return render(request, 'sample.html')
def sample2(request):
return render(request, 'sample2.html')
class UserLoginView(LoginView):
template_name = os.path.join('accounts', 'user_login.html')
authentication_form = UserLoginForm
class UserLogoutView(LogoutView):
pass
| [
"oregairu0302@gmail.com"
] | oregairu0302@gmail.com |
eaa8cdca8a91f7702597565436248b64ebc579c9 | de967af1619d397c2e1ef52cf8d65cdfbfd0c80c | /p4ye_tupes.py | e277dd79cc9017343557f7e1e89e9733aad0f502 | [] | no_license | Madhav-Jindal/Python_Github | edc364d9bf094435c354cb0d01129fa253099eab | c5c3fff76aec64964ec00b31fde7499b13eb5088 | refs/heads/master | 2022-12-31T13:20:30.296249 | 2020-10-01T05:10:55 | 2020-10-01T05:10:55 | 259,837,345 | 0 | 1 | null | 2020-10-01T05:10:56 | 2020-04-29T05:55:51 | Jupyter Notebook | UTF-8 | Python | false | false | 404 | py | hndl=open("mbox-short.txt")
word=[]
count=dict()
fword=[]
inpos=0
for i in hndl:
word=i.split()
if i=='' or i=='\n':
continue
if word[0]=="From":
inpos=i.find(':')
fword.append((i[inpos-2:inpos]))
for j in fword:
count[j]=count.get(j,0)+1
temp=list()
for k,v in count.items():
temp.append((k,v))
temp=sorted(temp)
for k,v in temp:
print(k,v)
| [
"jindalmadhav04@gmail.com"
] | jindalmadhav04@gmail.com |
9e7a593d82e82e240312fba791b546a1e69135a6 | 805b01faebe435c97ca74ff0806ed549a1e50baa | /inventory/ec2.py | 8e7538cde06521d966b1575879a677be65efb989 | [
"ISC"
] | permissive | rwittrick/kubernetes-the-hard-way-with-aws-and-ansible | 9b87d6cd2b0d6c67ef0b4f70c80b26a7df81a2b1 | b6a6b4419eb156601a41b14d9825bab629c9aa71 | refs/heads/master | 2020-03-19T08:43:09.062451 | 2016-10-10T19:00:24 | 2016-10-10T19:00:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,496 | py | #!/usr/bin/env python
from boto import ec2
import json
import sys
import os
inventory = {}
inventory['_meta'] = { 'hostvars': {} }
inventory['all'] = []
#
# Make boto connection
#
try:
aws_region = os.environ['AWS_REGION']
except:
print "ERROR: The AWS_REGION environment variable must be set"
sys.exit(1)
# Make the connection to AWS API
try:
ec2conn = ec2.connect_to_region(aws_region)
except:
print "ERROR: Unable to connect to AWS"
sys.exit(1)
# Run through all the instances
reservations = ec2conn.get_all_instances()
instances = [i for r in reservations for i in r.instances]
for i in instances:
# Check if the host has a name, if not we don't care about it anyways
try:
host_name = i.tags['Name']
except:
host_name = False
if i.state == "running" and host_name:
# Check for a public IP, if non use the private IP
if i.ip_address:
ip = i.ip_address
else:
ip = i.private_ip_address
# kubernetes role...
try:
krole = "tag_krole_" + i.tags['krole']
except:
krole = None
if krole != None:
try:
inventory[krole].append(host_name)
except:
inventory[krole] = []
inventory[krole].append(host_name)
# Only want hosts with a krole, ignore all others
inventory['all'].append(host_name)
inventory['_meta']['hostvars'][host_name] = {}
inventory['_meta']['hostvars'][host_name]['ansible_ssh_host'] = ip
print(json.dumps(inventory, indent=4))
| [
"curtis@serverascode.com"
] | curtis@serverascode.com |
3af880c13fca8332dce08ffcc292f394404d29ac | 515c889e3655496a86f62f4342a3bd19dbf3c6bf | /Day 3/Program 2.py | d3a27702a10b8974efa0448799b4606693eeb2ef | [] | no_license | shivank8/tathastu_week_of_code | 92c5429555f1aa2183e0733e446d4615f70bb1a4 | 638494330ccbcdeb72f96d31a459be40ae1d2c3a | refs/heads/master | 2022-10-02T05:53:10.929324 | 2020-06-08T13:39:20 | 2020-06-08T13:39:20 | 268,218,272 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 116 | py | str=input("Enter a string:")
for i in range(len(str)+1):
for j in range(i+1,len(str)+1):
print(str[i:j]) | [
"shivank8@users.noreply.github.com"
] | shivank8@users.noreply.github.com |
667a27f91a5feffa45b0df3b9f4c79d54a94be94 | af93b3909f86ab2d310a8fa81c9357d87fdd8a64 | /begginer/5. cas/zadatak5.py | f27bd47d532b65caf07d06877073feb078f9bbcb | [] | no_license | BiljanaPavlovic/pajton-kurs | 8cf15d443c9cca38f627e44d764106ef0cc5cd98 | 93092e6e945b33116ca65796570462edccfcbcb0 | refs/heads/master | 2021-05-24T14:09:57.536994 | 2020-08-02T15:00:12 | 2020-08-02T15:00:12 | 253,597,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | unos=input("Unesite stranice a i b:")
stranice=unos.split(" ")
obim=2*float(stranice[0])+2*float(stranice[1])
povrsina=float(stranice[0])*float(stranice[1])
print("O= ",obim)
print("P=",povrsina)
| [
"zabiljanupavlovic@gmail.com"
] | zabiljanupavlovic@gmail.com |
5531a245c2d02d47be36e799adedf7c425d2834d | 7c312f638b4282cb2c2166e6d8ecb028bd28755e | /SUP/TD1/Ex14.py | 65f1449b501e1840c73b6f38c82719b8f532be8b | [] | no_license | 3wnbr1/TD-Informatique-ECAM | 48aceb0d31d7d7bc3fffbe6b36dde8f9d15527b6 | 3f0417c1a9985780b0c38d89f9d371ec488f66fd | refs/heads/master | 2021-03-16T10:12:08.791056 | 2018-10-23T10:03:08 | 2018-10-23T10:03:08 | 121,541,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 26 14:56:45 2016
@author: ewen.brun
"""
s = 0
a = int(input("Entrez un nb entier"))
b = int(input("Entrez un autre nb entier"))
for i in range(1,a+1):
s+=b
print("a * b = ",s) | [
"ewen.brun@icloud.com"
] | ewen.brun@icloud.com |
aa1b903a714e6de8094e8910eb0d077b101caf1f | a4ce49d64e8d9cfc9b0e451d3741f62fc604819e | /hello/hello/settings.py | 30b639d15107ae3385f13a4eae0e110508f1410d | [] | no_license | Dimasik133/my-first-blog | 359e988261960b050be0aa407eef839e46a67250 | aa29103d80b2a6ca8cbeecb23a2fc29671621866 | refs/heads/master | 2021-04-15T04:29:10.188336 | 2018-03-25T16:58:22 | 2018-03-25T16:58:22 | 126,703,664 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,311 | py | """
Django settings for hello project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5&mw$k5-bbh^*cz__5x=&f*fmqfnw7pqksfti^!%a_)t+_^jeq'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'dima.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hello.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hello.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"noreply@github.com"
] | Dimasik133.noreply@github.com |
99e4e4ca7bb40a4fc37e65d4d6c65b0a7d078685 | b9d75e3e37d08262321b0dc726639fc25f152caa | /utils.py | cc49eb7b6a45ee027cefe48ead6e43e9a20dab51 | [] | no_license | G-Wang/pytorch_FFTNet | a2712763ae7ee2fff9d002c931593987d6e25060 | b96486f6823e762e71c2e299739b925081e5bacf | refs/heads/master | 2020-04-08T22:14:18.563719 | 2018-08-31T07:38:21 | 2018-08-31T07:38:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,463 | py | import numpy as np
import torch
from torch.nn import functional as F
from scipy.special import expn
from torchaudio.transforms import MuLawEncoding, MuLawExpanding
def encoder(quantization_channels):
return MuLawEncoding(quantization_channels)
def decoder(quantization_channels):
return MuLawExpanding(quantization_channels)
def np_mulaw(x, quantization_channels):
mu = quantization_channels - 1
x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)
return x_mu
def np_inv_mulaw(x, quantization_channels):
mu = quantization_channels - 1
x = np.sign(x) * (np.exp(np.abs(x) * np.log1p(mu)) - 1.) / mu
return x
def float2class(x, classes):
mu = classes - 1
return np.rint((x + 1) / 2 * mu).astype(int)
def class2float(x, classes):
mu = classes - 1
return x.astype(float) / mu * 2 - 1.
def zero_padding(x, maxlen, dim=0):
diff = maxlen - x.shape[dim]
if diff <= 0:
return x
else:
pad_shape = ()
for i in range(len(x.shape)):
if i != dim:
pad_shape += ((0, 0),)
else:
pad_shape += ((0, diff),)
return np.pad(x, pad_shape, 'constant')
def repeat_last_padding(x, maxlen):
diff = maxlen - x.shape[-1]
if diff <= 0:
return x
else:
pad_value = np.tile(x[..., [-1]], diff)
return np.concatenate((x, pad_value), axis=-1)
# this function is copied from https://github.com/braindead/logmmse/blob/master/logmmse.py
# change numpy to tensor
def logmmse(x, sr, noise_std=1 / 256):
window_size = int(0.02 * sr)
if window_size % 2 == 1:
window_size += 1
# noverlap = len1; hop_size = len2; window_size = len
noverlap = int(window_size * 0.75)
hop_size = window_size - noverlap
win = torch.hann_window(window_size)
win *= hop_size / win.sum()
nfft = 2 ** (window_size - 1).bit_length()
pad_pos = (nfft - window_size) // 2
noise = torch.randn(6, window_size) * noise_std
noise_fft = torch.rfft(F.pad(win * noise, (pad_pos, pad_pos)), 1)
noise_mean = noise_fft.pow(2).sum(-1).sqrt()
noise_mu = noise_mean.mean(0)
noise_mu2 = noise_mu.pow(2)
spec = torch.stft(x, nfft, hop_length=hop_size, win_length=window_size, window=win, center=False)
spec_copy = spec.clone()
sig2 = spec.pow(2).sum(-1)
vad_curve = vad(x, S=spec).float()
aa = 0.98
ksi_min = 10 ** (-25 / 10)
gammak = torch.min(sig2 / noise_mu2.unsqueeze(-1), torch.Tensor([40]))
for n in range(spec.size(1)):
gammak_n = gammak[:, n]
if n == 0:
ksi = aa + (1 - aa) * F.relu(gammak_n - 1)
else:
ksi = aa * spec_copy[:, n - 1].pow(2).sum(-1) / noise_mu2 + (1 - aa) * F.relu(gammak_n - 1)
ksi = torch.max(ksi, torch.Tensor([ksi_min]))
A = ksi / (1 + ksi)
vk = A * gammak_n
ei_vk = 0.5 * expint(vk)
hw = A * ei_vk.exp()
spec_copy[:, n] *= hw.unsqueeze(-1)
xi_w = torch.irfft(spec_copy.transpose(0, 1), 1, signal_sizes=torch.Size([nfft]))[:, pad_pos:-pad_pos]
origin = torch.irfft(spec.transpose(0, 1), 1, signal_sizes=torch.Size([nfft]))[:, pad_pos:-pad_pos]
xi_w_mask = vad_curve / 2 + 0.5
orign_mask = (1 - vad_curve) / 2
final_framed = xi_w * xi_w_mask.unsqueeze(-1) + origin * orign_mask.unsqueeze(-1)
xfinal = torch.zeros(final_framed.size(0) * hop_size + noverlap)
k = 0
for n in range(final_framed.size(0)):
xfinal[k:k + window_size] += final_framed[n]
k += hop_size
return xfinal
def expint(x):
x = x.detach().cpu().numpy()
x = expn(1, x)
return torch.from_numpy(x).float()
def vad(x, hop_size=256, S=None, k=5, med_num=9):
if S is None:
S = torch.stft(x, hop_size * 4, hop_length=hop_size)
energy = S.pow(2).sum(-1).mean(0).sqrt()
energy /= energy.max()
sorted_E, _ = energy.sort()
sorted_E_d = sorted_E[2:] - sorted_E[:-2]
smoothed = F.pad(sorted_E_d, (7, 7)).unfold(0, 15, 1).mean(-1)
sorted_E_d_peak = F.relu(smoothed[1:-1] - smoothed[:-2]) * F.relu(smoothed[1:-1] - smoothed[2:])
first, *dummy = torch.nonzero(sorted_E_d_peak) + 2
E_th = sorted_E[:first].mean() * k
decision = torch.gt(energy, E_th)
pad = (med_num // 2, med_num // 2)
decision = F.pad(decision, pad)
decision = decision.unfold(0, med_num, 1)
decision, _ = decision.median(dim=-1)
return decision
| [
"ya70201@gmail.com"
] | ya70201@gmail.com |
6269c118b785c226dfabf5a8ce4a5b88e4cb06f7 | 9007e11ff17d8306ad7fa2d01120b849a6416145 | /firstMissingPos.py | f53139f9b8de197091a11d21d31e56f4456ff46c | [] | no_license | ag300g/leecode | 48da8aeb57cdf0af260c66651515676828ed4bfb | 099432775d604d67b021ea0b1b1e5407fe95c3a0 | refs/heads/master | 2020-03-29T13:08:58.432511 | 2018-10-22T06:17:47 | 2018-10-22T06:17:47 | 149,942,855 | 0 | 0 | null | 2018-09-23T02:56:03 | 2018-09-23T02:56:03 | null | UTF-8 | Python | false | false | 771 | py | # Given an unsorted integer array, find the first missing positive integer.
# For example,
# Given [1,2,0] return 3,
# and [3,4,-1,1] return 2.
# Your algorithm should run in O(n) time and uses constant space.
class Solution:
# @param A, a list of integers
# @return an integer
def firstMissingPositive(self, A):
l = len(A)
for i in range(l):
while A[i]-1 != i and -1 < A[i]-1 < l:
loc = A[i]-1
if A[loc] == A[i]: break
A[loc], A[i] = A[i], A[loc]
count = 0
while count < l and A[count]-1 == count:
count += 1
return count+1
if __name__ == '__main__':
A = [3,4,-1,1]
test = Solution()
out = test.firstMissingPositive(A)
print out
| [
"anqicheng@reshut2-90-80-dhcp.int.colorado.edu"
] | anqicheng@reshut2-90-80-dhcp.int.colorado.edu |
669f0b6c3c8618d79002c152b61eed0cf7a17f6c | 01bdaed4a2765b90aa2466cc423070075800c445 | /main.py | cbb6919d34cf82421e4df3c6e586266b691fdf95 | [] | no_license | sayho93/dataAnalysis | c97cc6b7345dd322296d6bee79aec147d6b76b66 | f79454c31c38bd7d1a31c34d4f9af0fae3ef44a2 | refs/heads/master | 2023-01-24T08:54:46.448174 | 2020-12-01T16:40:15 | 2020-12-01T16:40:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,280 | py | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from statsmodels.formula.api import ols
sns.set_palette('muted')
sns.set_style('whitegrid')
pd.set_option('display.max_rows', 3200)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
pd.set_option('display.max_colwidth', None)
plt.rc('font', family='Malgun Gothic')
plt.rc('axes', unicode_minus=False)
# -------흡연율
smokingData = pd.read_excel('./smokingRate.xlsx')
smokingByAge = smokingData.iloc[3:11]
smokingByAge = smokingByAge.drop(['성별(1)', '응답자특성별(1)'], axis=1)
smokingByAge = smokingByAge[
['응답자특성별(2)', '1998.1', '2001.1', '2005.1', '2007.1', '2008.1', '2009.1', '2010.1', '2011.1', '2012.1', '2013.1',
'2014.1', '2015.1', '2016.1', '2017.1', '2018.1']]
index = smokingByAge.iloc[:, 0].values
smokingByAge.index = index
smokingByAge = smokingByAge.drop(['응답자특성별(2)'], axis=1)
smokingByAge.columns = ['1998', '2001', '2005', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015',
'2016',
'2017', '2018']
# print(smokingByAge)
# for idx, row in smokingByAge.iterrows():
# plt.plot(smokingByAge.columns, smokingByAge.loc[idx].to_numpy(), marker='o')
#
# plt.title("흡연률", fontsize=20)
# plt.xlabel('Year', fontsize=14)
# plt.ylabel('%', fontsize=14)
# plt.legend(smokingByAge.index)
# # plt.show()
# plt.savefig("res/smokingRate.png", bbox_inches='tight')
# plt.close()
# -------음주율
drinkingData = pd.read_excel("./drinkingRate.xlsx")
drinkingByAge = drinkingData.iloc[3:11]
drinkingByAge = drinkingByAge.drop(['성별(1)', '응답자특성별(1)'], axis=1)
drinkingByAge = drinkingByAge[
['응답자특성별(2)', '2005.1', '2007.1', '2008.1', '2009.1', '2010.1', '2011.1', '2012.1', '2013.1', '2014.1', '2015.1',
'2016.1', '2017.1', '2018.1']]
index = drinkingByAge.iloc[:, 0].values
drinkingByAge.index = index
# print(drinkingByAge)
drinkingByAge = drinkingByAge.drop(['응답자특성별(2)'], axis=1)
drinkingByAge.columns = ['2005', '2007', '2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017',
'2018']
# print(drinkingByAge)
# for idx, row in drinkingByAge.iterrows():
# plt.plot(drinkingByAge.columns, drinkingByAge.loc[idx].to_numpy(), marker='o')
#
# plt.title("음주율", fontsize=20)
# plt.xlabel('Year', fontsize=14)
# plt.ylabel('%', fontsize=14)
# plt.legend(drinkingByAge.index)
# # plt.show()
# plt.savefig("res/drinkingRate.png", bbox_inches='tight')
# plt.close()
# -------Cancer
cancerData = pd.read_excel('./cancer.xlsx')
mask = (cancerData.성별 == "남자") & (cancerData.항목 == "연령군발생률")
mask2 = (cancerData.성별 == "여자") & (cancerData.항목 == "연령군발생률")
cancerDataMale = cancerData.loc[mask, :]
cancerDataFemale = cancerData.loc[mask2, :]
cancerDataMale = cancerDataMale.fillna(0)
cancerDataFemale = cancerDataFemale.fillna(0)
cancerDataMale = cancerDataMale.replace('-', 0)
cancerDataFemale = cancerDataFemale.replace('-', 0)
mask = cancerDataMale.연령군 == "계"
mask2 = cancerDataFemale.연령군 == "계"
cancerDataMale = cancerDataMale.loc[mask, :]
cancerDataFemale = cancerDataFemale.loc[mask2, :]
masked = cancerDataMale
masked = masked.drop(['성별', '연령군', '항목', '단위'], axis=1)
masked = masked.reset_index(drop=True)
maskedFemale = cancerDataFemale
maskedFemale = maskedFemale.drop(['성별', '연령군', '항목', '단위'], axis=1)
maskedFemale = maskedFemale.reset_index(drop=True)
def plotByYear(data, i, gender):
txt = gender == 1 and "남자" or "여자"
tmp = data.iloc[[i]]
title = tmp.iloc[0, 0] + " - " + txt
tmp = tmp.transpose()
tmp = tmp.drop(['24개 암종'])
print(title)
ax = sns.barplot(
data=tmp,
x=tmp.index,
y=tmp.iloc[:, 0]
)
ax.set_title(title)
plt.savefig("res/type/type{}-{}.png".format(i, txt), bbox_inches='tight')
plt.close()
def plotByDisease(data, index, gender):
txt = gender == 1 and "남자" or "여자"
title = data.index.values[index] + " - " + txt
print(title)
print(data.iloc[index][1:])
ax = sns.barplot(
data=data,
x=data.iloc[index][1:],
y=data.iloc[0][1:]
)
ax.set_title(title)
plt.savefig("res/year/year{}-{}.png".format(index, txt), bbox_inches='tight')
plt.close()
# plt.rcParams["figure.figsize"] = (14, 4)
# plt.rcParams['lines.linewidth'] = 2
# plt.rcParams['axes.grid'] = True
#
# for i in range(0, 24):
# plotByYear(masked, i, 1)
# plotByYear(maskedFemale, i, 0)
#
#
# plt.rcParams["figure.figsize"] = (10, 10)
# plt.rcParams['lines.linewidth'] = 0.1
# plt.rcParams['axes.grid'] = True
#
# tmp = masked.transpose()
# tmp2 = maskedFemale.transpose()
#
# for i in range(1, 20):
# plotByDisease(tmp, i, 1)
# plotByDisease(tmp2, i, 0)
data = pd.read_csv('./data.csv', error_bad_lines=False, encoding="CP949")
data = data.drop('Unnamed: 0', axis=1)
"""
DI1_dg - 고혈압 의사진단 여부 0: 없음 1: 있음 8: 비해당(청소년, 소아) 9: 모름, 무응답
DI1_ag - 고혈압 진단시기 0~79: 0~79세 80: 80세이상 888:비해당(청소년, 소아, 의사진단 받지 않음) 999: 모름, 무응답
DI1_pr - 고혈압 현재 유병 여부 0: 없음 1: 있음 8: 비해당 9: 모름, 무응답
DI1_pt - 고혈압 치료 여부 0: 없음 1: 있음 8: 비해당 9: 모름, 무응답
DI1_2 - 혈압조절제 복용 1: 매일 목용 2: 한달에 20일 이상 3: 한달에 15일 이상 4: 한달에 15일 미만 5: 미복용 8: 비해당 9: 모름, 무응답
BD1 - (만12세이상)평생음주경험
BD2 - (만12세이상)음주 시작 연령
BD1_11 - (만12세이상)1년간 음주빈도
BD2_1 - (만12세이상)한번에 마시는 음주량
BD2_14 - (만12세이상)한번에 마시는 음주량_잔
BD2_31 - (만12세이상) 폭음 빈도
BD2_32 - (성인) 여자 폭음 빈도
BS1_1 - (성인) 평생흡연 여부
BS2_1 - (성인) 흡연 시작연령
BS2_2 - (성인)매일 흡연 시작연령
BS3_1 - (성인) 현재흡연 여부
BS3_2 - (성인) 하루평균 흡연량
BS3_3 - (성인) 가끔흡연자 최근 1달간 흡연일수
BS6_2 - (성인) 과거흡연자 흡연기간(월 환산)
BS6_2_1 - (성인) 과거흡연자 흡연기간(연)
BS6_2_2 - (성인) 과거흡연자 흡연기간(월)
BS6_3 - (성인) 과거흡연자 하루평균 흡연량
BS6_4 - (성인) 과거흡연자 금연기간(월 환산)
BS6_4_1 - (성인) 과거흡연자 금연기간(연)
BS6_4_2 - (성인) 과거흡연자 금연기간(월)
"""
params = ["age", "DI1_dg", "DI1_ag", "DI1_pr", "DI1_pt", "DI1_2", "BD1", "BD2", "BD1_11", "BD2_1", "BD2_14", "BD2_31",
"BD2_32"]
highBP = data[params]
# mask = highBP.DI1_dg == 1
# highBP = highBP.loc[mask, :]
# print(highBP)
# print(highBP.count())
# mask = (highBP.BD1_11 >= 4) & (highBP.BD1_11 != 8) & (highBP.BD1_11 != 9)
mask = (highBP.DI1_ag != 999) & (highBP.DI1_ag != 888)
highBP = highBP.loc[mask, :]
# print(highBP)
# print(highBP.count())
plt.scatter(highBP["BD2_31"].values, highBP["DI1_ag"].values, c="steelblue", edgecolor="white", s=20)
plt.title('폭음 빈도 vs 고혈압 진단시기')
plt.xlabel('BD2_31')
plt.ylabel('DI1_ag')
plt.grid()
# plt.show()
plt.savefig("res/폭음빈도 vs 고혈압 진단시기.png", bbox_inches='tight')
plt.close()
params = ["age", "DI1_dg", "DI1_ag", "DI1_pr", "DI1_pt", "DI1_2", "BS1_1", "BS2_1", "BS2_2", "BS3_1", "BS3_2", "BS3_3",
"BS6_2", "BS6_2_1", "BS6_2_2", "BS6_3", "BS6_4", "BS6_4_1", "BS6_4_2"]
highBP = data[params]
mask = (highBP.BS2_2 != 999) & (highBP.BS2_2 != 888) & (highBP.DI1_ag != 999) & (highBP.DI1_ag != 888) & \
(highBP.DI1_dg == 1)
highBP = highBP.loc[mask, :]
# print(highBP)
title = "고혈압 진단시기 vs 매입흡연 시작연령"
plt.scatter(highBP["BS2_2"].values, highBP["DI1_ag"].values, c="steelblue", edgecolor="white", s=20)
plt.title(title)
plt.xlabel('BS2_2')
plt.ylabel('DI1_ag')
plt.grid()
# plt.show()
plt.savefig("res/고혈압 진단시기 vs 매일흡연 시작연령.png", bbox_inches='tight')
plt.close()
X = highBP[["BS2_2"]]
Y = highBP[["DI1_ag"]]
lr = LinearRegression()
lr.fit(X, Y)
prediction = lr.predict(X)
print("w = ", lr.coef_)
print("b = ", lr.intercept_)
plt.plot(X, lr.predict(X), color='red', lw=2)
plt.scatter(X.values, Y.values, c="steelblue", edgecolor="white", s=30)
plt.title(title)
plt.xlabel("BS2_2")
plt.ylabel("DI1_ag")
plt.grid()
# plt.show()
plt.savefig("res/고혈압 진단시기 vs 매일흡연 시작연령_regression.png", bbox_inches='tight')
plt.close()
print('Mean_Squared_Error = ', mean_squared_error(prediction, Y))
print('RMSE = ', mean_squared_error(prediction, Y) ** 0.5)
res = ols('DI1_ag~BS2_2', data=highBP).fit()
print(title)
print(res.summary())
# ------------------------------------ Age - highBP
params = ["age", "DI1_dg", "DI1_ag", "DI1_pr", "DI1_pt", "DI1_2", "BS1_1", "BS2_1", "BS2_2", "BS3_1", "BS3_2", "BS3_3",
"BS6_2", "BS6_2_1", "BS6_2_2", "BS6_3", "BS6_4", "BS6_4_1", "BS6_4_2"]
highBP = data[params]
mask = (highBP.DI1_ag != 999) & (highBP.DI1_ag != 888) & (highBP.DI1_dg == 1)
highBP = highBP.loc[mask, :]
plt.scatter(highBP["age"].values, highBP["DI1_ag"].values, c="steelblue", edgecolor="white", s=20)
plt.title('고혈압 진단시기 vs 연령')
plt.xlabel('age')
plt.ylabel('DI1_ag')
plt.grid()
# plt.show()
plt.savefig("res/고혈압 진단시기 vs 연령.png", bbox_inches='tight')
plt.close()
X = highBP[["age"]]
Y = highBP[["DI1_ag"]]
lr = LinearRegression()
lr.fit(X, Y)
prediction = lr.predict(X)
print("w = ", lr.coef_)
print("b = ", lr.intercept_)
plt.plot(X, lr.predict(X), color='red', lw=2)
plt.scatter(X.values, Y.values, c="steelblue", edgecolor="white", s=30)
plt.title("고혈압 진단시기 vs 연령")
plt.xlabel("연령")
plt.ylabel("고혈압 진단시기")
plt.grid()
plt.savefig("res/고혈압 진단시기 vs 연령_regression.png", bbox_inches='tight')
# plt.show()
plt.close()
print('Mean_Squared_Error = ', mean_squared_error(prediction, Y))
print('RMSE = ', mean_squared_error(prediction, Y) ** 0.5)
res = ols('DI1_ag~age', data=highBP).fit()
print(res.summary())
# --------------------------------- gastric cancer
params = ["age", "DC1_dg", "DC1_ag", "DC1_pr", "DC1_pt", "DI1_2", "BS1_1", "BS2_1", "BS2_2", "BS3_1", "BS3_2", "BS3_3",
"BS6_2", "BS6_2_1", "BS6_2_2", "BS6_3", "BS6_4", "BS6_4_1", "BS6_4_2", "BD1", "BD2", "BD1_11", "BD2_1",
"BD2_14", "BD2_31", "BD2_32"]
gastricCancer = data[params]
mask = (gastricCancer.BD2 != 999) & (gastricCancer.BD2 != 888) & (gastricCancer.DC1_dg == 1)
gastricCancer = gastricCancer.loc[mask, :]
print(gastricCancer)
plt.scatter(gastricCancer["DC1_ag"].values, gastricCancer["BD2"].values, c="steelblue", edgecolor="white", s=20)
plt.title('음주 시작 연령 vs 위암 진단시기')
plt.xlabel('DC1_ag')
plt.ylabel('BD2')
plt.grid()
# plt.show()
plt.savefig("res/음주 시작 연령 vs 위암 진단시기.png", bbox_inches='tight')
plt.close()
# --------------------------------- highBP income
params = ["age", "ainc", "DI1_dg", "DI1_ag", "DI1_pr", "DI1_pt", "DI1_2", "BS1_1", "BS2_1", "BS2_2", "BS3_1", "BS3_2",
"BS3_3", "BS6_2", "BS6_2_1", "BS6_2_2", "BS6_3", "BS6_4", "BS6_4_1", "BS6_4_2"]
highBP = data[params]
highBP = highBP[highBP['ainc'].notna()]
mask = (highBP.DI1_ag != 999) & (highBP.DI1_ag != 888) & (highBP.DI1_dg == 1)
highBP = highBP.loc[mask, :]
title = "고혈압 진단시기 vs 월평균 가구총소득"
plt.scatter(highBP["ainc"].values, highBP["DI1_ag"].values, c="steelblue", edgecolor="white", s=20)
plt.title(title)
plt.xlabel('ainc')
plt.ylabel('DI1_ag')
plt.grid()
# plt.show()
plt.savefig("res/고혈압 진단시기 vs 월평균 가구총소득.png", bbox_inches='tight')
plt.close()
X = highBP[["ainc"]]
Y = highBP[["DI1_ag"]]
lr = LinearRegression()
lr.fit(X, Y)
prediction = lr.predict(X)
print("w = ", lr.coef_)
print("b = ", lr.intercept_)
plt.plot(X, lr.predict(X), color='red', lw=2)
plt.scatter(X.values, Y.values, c="steelblue", edgecolor="white", s=30)
plt.title(title)
plt.xlabel("BS2_2")
plt.ylabel("DI1_ag")
plt.grid()
# plt.show()
plt.savefig("res/고혈압 진단시기 vs 월평균 가구총소득_regression.png", bbox_inches='tight')
plt.close()
print('Mean_Squared_Error = ', mean_squared_error(prediction, Y))
print('RMSE = ', mean_squared_error(prediction, Y) ** 0.5)
res = ols('DI1_ag~ainc', data=highBP).fit()
print(title)
print(res.summary())
# --------------------------------- stroke / drinking
params = ["age", "ainc", "DI3_dg", "DI3_ag", "DI3_pr", "DI3_pt", "BS1_1", "BS2_1", "BS2_2", "BS3_1", "BS3_2",
"BS3_3", "BS6_2", "BS6_2_1", "BS6_2_2", "BS6_3", "BS6_4", "BS6_4_1", "BS6_4_2", "BD1", "BD2", "BD1_11",
"BD2_1", "BD2_14", "BD2_31", "BD2_32"]
stroke = data[params]
mask = (stroke.DI3_ag != 999) & (stroke.DI3_ag != 888) & (stroke.DI3_dg == 1) & (stroke.BD2 != 888) & (stroke.BD2 != 999)
stroke = stroke.loc[mask, :]
title = "뇌졸중 진단시기 vs 음주 시작 연령"
plt.scatter(stroke["BD2"].values, stroke["DI3_ag"].values, c="steelblue", edgecolor="white", s=20)
plt.title(title)
plt.xlabel('음주 시작 연령')
plt.ylabel('뇌졸중 진단시기')
plt.grid()
# plt.show()
plt.savefig("res/뇌졸중 진단시기 vs 음주 시작 연령.png", bbox_inches='tight')
plt.close()
X = stroke[["BD2"]]
Y = stroke[["DI3_ag"]]
lr = LinearRegression()
lr.fit(X, Y)
prediction = lr.predict(X)
print("w = ", lr.coef_)
print("b = ", lr.intercept_)
plt.plot(X, lr.predict(X), color='red', lw=2)
plt.scatter(X.values, Y.values, c="steelblue", edgecolor="white", s=30)
plt.title(title)
plt.xlabel("음주 시작 연령")
plt.ylabel("뇌졸중 진단시기")
plt.grid()
# plt.show()
plt.savefig("res/뇌졸중 진단시기 vs 음주 시작 연령_regression.png", bbox_inches='tight')
plt.close()
print('Mean_Squared_Error = ', mean_squared_error(prediction, Y))
print('RMSE = ', mean_squared_error(prediction, Y) ** 0.5)
res = ols('DI3_ag~BD2', data=stroke).fit()
print(title)
print(res.summary())
# --------------------------------- stroke / smoking
params = ["age", "ainc", "DI3_dg", "DI3_ag", "DI3_pr", "DI3_pt", "BS1_1", "BS2_1", "BS2_2", "BS3_1", "BS3_2",
"BS3_3", "BS6_2", "BS6_2_1", "BS6_2_2", "BS6_3", "BS6_4", "BS6_4_1", "BS6_4_2", "BD1", "BD2", "BD1_11",
"BD2_1", "BD2_14", "BD2_31", "BD2_32"]
stroke = data[params]
mask = (stroke.DI3_ag != 999) & (stroke.DI3_ag != 888) & (stroke.DI3_dg == 1) & (stroke.BS2_2 != 888) & (stroke.BS2_2 != 999)
stroke = stroke.loc[mask, :]
title = '뇌졸중 진단시기 vs 흡연 시작 연령'
plt.scatter(stroke["BS2_2"].values, stroke["DI3_ag"].values, c="steelblue", edgecolor="white", s=20)
plt.title(title)
plt.xlabel('흡연 시작 연령')
plt.ylabel('뇌졸중 진단시기')
plt.grid()
# plt.show()
plt.savefig("res/뇌졸중 진단시기 vs 흡연 시작 연령.png", bbox_inches='tight')
plt.close()
X = stroke[["BS2_2"]]
Y = stroke[["DI3_ag"]]
lr = LinearRegression()
lr.fit(X, Y)
prediction = lr.predict(X)
print("w = ", lr.coef_)
print("b = ", lr.intercept_)
plt.plot(X, lr.predict(X), color='red', lw=2)
plt.scatter(X.values, Y.values, c="steelblue", edgecolor="white", s=30)
plt.title(title)
plt.xlabel("흡연 시작 연령")
plt.ylabel("뇌졸중 진단시기")
plt.grid()
# plt.show()
plt.savefig("res/뇌졸중 진단시기 vs 흡연 시작 연령_regression.png", bbox_inches='tight')
plt.close()
print('Mean_Squared_Error = ', mean_squared_error(prediction, Y))
print('RMSE = ', mean_squared_error(prediction, Y) ** 0.5)
res = ols('DI3_ag~BS2_2', data=stroke).fit()
print(title)
print(res.summary())
# --------------------------------- stroke / chol
params = ["age", "ainc", "DI3_dg", "DI3_ag", "DI3_pr", "DI3_pt", "HE_chol", "HE_TG"]
stroke = data[params]
mask = (stroke.DI3_ag != 999) & (stroke.DI3_ag != 888) & (stroke.DI3_dg == 1)
stroke = stroke.loc[mask, :]
stroke = stroke[stroke['HE_chol'].notna()]
title = '뇌졸중 진단시기 vs 혈중 콜레스테롤'
plt.scatter(stroke["HE_chol"].values, stroke["DI3_ag"].values, c="steelblue", edgecolor="white", s=20)
plt.title(title)
plt.xlabel('혈중 콜레스테롤')
plt.ylabel('뇌졸중 진단시기')
plt.grid()
# plt.show()
plt.savefig("res/뇌졸중 진단시기 vs 혈중 콜레스테롤.png", bbox_inches='tight')
plt.close()
X = stroke[["HE_chol"]]
Y = stroke[["DI3_ag"]]
lr = LinearRegression()
lr.fit(X, Y)
prediction = lr.predict(X)
print("w = ", lr.coef_)
print("b = ", lr.intercept_)
plt.plot(X, lr.predict(X), color='red', lw=2)
plt.scatter(X.values, Y.values, c="steelblue", edgecolor="white", s=30)
plt.title(title)
plt.xlabel("혈중 콜레스테롤")
plt.ylabel("뇌졸중 진단시기")
plt.grid()
# plt.show()
plt.savefig("res/뇌졸중 진단시기 vs 혈중 콜레스테롤_regression.png", bbox_inches='tight')
plt.close()
print('Mean_Squared_Error = ', mean_squared_error(prediction, Y))
print('RMSE = ', mean_squared_error(prediction, Y) ** 0.5)
res = ols('DI3_ag~HE_chol', data=stroke).fit()
print(title)
print(res.summary())
# --------------------------------- stroke / chol
params = ["age", "ainc", "DI3_dg", "DI3_ag", "DI3_pr", "DI3_pt", "HE_chol", "HE_TG"]
stroke = data[params]
mask = (stroke.DI3_ag != 999) & (stroke.DI3_ag != 888) & (stroke.DI3_dg == 1)
stroke = stroke.loc[mask, :]
stroke = stroke[stroke['HE_TG'].notna()]
title = '뇌졸중 진단시기 vs 중성지방'
plt.scatter(stroke["HE_TG"].values, stroke["DI3_ag"].values, c="steelblue", edgecolor="white", s=20)
plt.title(title)
plt.xlabel('중성지방')
plt.ylabel('뇌졸중 진단시기')
plt.grid()
# plt.show()
plt.savefig("res/뇌졸중 진단시기 vs 중성지방.png", bbox_inches='tight')
plt.close()
X = stroke[["HE_TG"]]
Y = stroke[["DI3_ag"]]
lr = LinearRegression()
lr.fit(X, Y)
prediction = lr.predict(X)
print("w = ", lr.coef_)
print("b = ", lr.intercept_)
plt.plot(X, lr.predict(X), color='red', lw=2)
plt.scatter(X.values, Y.values, c="steelblue", edgecolor="white", s=30)
plt.title(title)
plt.xlabel("중성지방")
plt.ylabel("뇌졸중 진단시기")
plt.grid()
# plt.show()
plt.savefig("res/뇌졸중 진단시기 vs 중성지방_regression.png", bbox_inches='tight')
plt.close()
print('Mean_Squared_Error = ', mean_squared_error(prediction, Y))
print('RMSE = ', mean_squared_error(prediction, Y) ** 0.5)
res = ols('DI3_ag~HE_TG', data=stroke).fit()
print(title)
print(res.summary())
# --------------------------------- lung cancer / smoking
params = ["age", "ainc", "DC6_dg", "DC6_ag", "DC6_pr", "DC6_pt", "BS1_1", "BS2_1", "BS2_2", "BS3_1", "BS3_2",
"BS3_3", "BS6_2", "BS6_2_1", "BS6_2_2", "BS6_3", "BS6_4", "BS6_4_1", "BS6_4_2", "BD1", "BD2", "BD1_11",
"BD2_1", "BD2_14", "BD2_31", "BD2_32"]
lungCancer = data[params]
mask = (lungCancer.DC6_ag != 999) & (lungCancer.DC6_ag != 888) & (lungCancer.DC6_dg == 1) & (lungCancer.BS2_2 != 888) & (lungCancer.BS2_2 != 999)
lungCancer = lungCancer.loc[mask, :]
title = '폐암 진단시기 vs 매일흠연 시작연령'
plt.scatter(lungCancer["BS2_2"].values, lungCancer["DC6_ag"].values, c="steelblue", edgecolor="white", s=20)
plt.title(title)
plt.xlabel('매일흠연 시작연령')
plt.ylabel('폐암 진단시기')
plt.grid()
# plt.show()
plt.savefig("res/폐암 진단시기 vs 매일흠연 시작연령.png", bbox_inches='tight')
plt.close()
X = lungCancer[["BS2_2"]]
Y = lungCancer[["DC6_ag"]]
lr = LinearRegression()
lr.fit(X, Y)
prediction = lr.predict(X)
print("w = ", lr.coef_)
print("b = ", lr.intercept_)
plt.plot(X, lr.predict(X), color='red', lw=2)
plt.scatter(X.values, Y.values, c="steelblue", edgecolor="white", s=30)
plt.title(title)
plt.xlabel("매일흠연 시작연령")
plt.ylabel("폐암 진단시기")
plt.grid()
# plt.show()
plt.savefig("res/폐암 진단시기 vs 매일흠연 시작연령_regression.png", bbox_inches='tight')
plt.close()
print('Mean_Squared_Error = ', mean_squared_error(prediction, Y))
print('RMSE = ', mean_squared_error(prediction, Y) ** 0.5)
res = ols('DC6_ag~BS2_2', data=lungCancer).fit()
print(title)
print(res.summary())
params = ["age", "ainc", "DC2_dg", "DC2_ag", "DC2_pr", "DC2_pt", "BS1_1", "BS2_1", "BS2_2", "BS3_1", "BS3_2",
"BS3_3", "BS6_2", "BS6_2_1", "BS6_2_2", "BS6_3", "BS6_4", "BS6_4_1", "BS6_4_2", "BD1", "BD2", "BD1_11",
"BD2_1", "BD2_14", "BD2_31", "BD2_32"]
liverCancer = data[params]
mask = (liverCancer.DC2_ag != 999) & (liverCancer.DC2_ag != 888) & (liverCancer.DC2_dg == 1) & (liverCancer.BD2_31 != 8) & (liverCancer.BD2_31 != 9)
liverCancer = liverCancer.loc[mask, :]
title = '간암 진단시기 vs 폭음 빈도'
plt.scatter(liverCancer["BD2_31"].values, liverCancer["DC2_ag"].values, c="steelblue", edgecolor="white", s=20)
plt.title(title)
plt.xlabel('폭음 빈도')
plt.ylabel('간암 진단시기')
plt.grid()
# plt.show()
plt.savefig("res/간암 진단시기 vs 폭음 빈도.png", bbox_inches='tight')
plt.close()
X = liverCancer[["BD2_31"]]
Y = liverCancer[["DC2_ag"]]
lr = LinearRegression()
lr.fit(X, Y)
prediction = lr.predict(X)
print("w = ", lr.coef_)
print("b = ", lr.intercept_)
plt.plot(X, lr.predict(X), color='red', lw=2)
plt.scatter(X.values, Y.values, c="steelblue", edgecolor="white", s=30)
plt.title(title)
plt.xlabel("폭음 빈도")
plt.ylabel("간암 진단시기")
plt.grid()
# plt.show()
plt.savefig("res/간암 진단시기 vs 폭음 빈도_regression.png", bbox_inches='tight')
plt.close()
print('Mean_Squared_Error = ', mean_squared_error(prediction, Y))
print('RMSE = ', mean_squared_error(prediction, Y) ** 0.5)
res = ols('DC2_ag~BD2_31', data=liverCancer).fit()
print(title)
print(res.summary())
# --------------------------------- stroke / highBP
params = ["age", "ainc", "DI3_dg", "DI3_ag", "DI3_pr", "DI3_pt", "DI1_dg", "DI1_ag", "DI1_pr", "DI1_pt", "DI1_2"]
stroke = data[params]
mask = (stroke.DI3_ag != 999) & (stroke.DI3_ag != 888) & (stroke.DI3_dg == 1) & (stroke.DI1_ag != 999) & (stroke.DI1_ag != 999)
stroke = stroke.loc[mask, :]
# stroke = stroke[stroke['HE_TG'].notna()]
title = '뇌졸중 진단시기 vs 고혈압 진단시기'
plt.scatter(stroke["DI1_ag"].values, stroke["DI3_ag"].values, c="steelblue", edgecolor="white", s=20)
plt.title(title)
plt.xlabel('고혈압 진단시기')
plt.ylabel('뇌졸중 진단시기')
plt.grid()
# plt.show()
plt.savefig("res/{}.png".format(title), bbox_inches='tight')
plt.close()
X = stroke[["DI1_ag"]]
Y = stroke[["DI3_ag"]]
lr = LinearRegression()
lr.fit(X, Y)
prediction = lr.predict(X)
print("w = ", lr.coef_)
print("b = ", lr.intercept_)
plt.plot(X, lr.predict(X), color='red', lw=2)
plt.scatter(X.values, Y.values, c="steelblue", edgecolor="white", s=30)
plt.title(title)
plt.xlabel("고혈압 진단시기")
plt.ylabel("뇌졸중 진단시기")
plt.grid()
# plt.show()
plt.savefig("res/{}_regression.png".format(title), bbox_inches='tight')
plt.close()
print('Mean_Squared_Error = ', mean_squared_error(prediction, Y))
print('RMSE = ', mean_squared_error(prediction, Y) ** 0.5)
res = ols('DI3_ag~DI1_ag', data=stroke).fit()
print(title)
print(res.summary())
| [
"fishcreek@naver.com"
] | fishcreek@naver.com |
356bbdf32bca6e5c3dbf086a01f0173b0fb5e0db | 2af63abb31b8e07328703ad8c7f461cb16f7b629 | /python/graph/graph.py | 8f5cce073c4e34e87d7fbd4683e206a559739b5f | [] | no_license | venkatsgithub1/DataStructures | 2acb4cce2c12325fa19367238d20ef784487ad6c | 7a795256c28484e96cf1f631df4d88564ab79815 | refs/heads/master | 2021-01-21T06:25:19.412757 | 2018-11-07T12:18:33 | 2018-11-07T12:18:33 | 83,232,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,039 | py | class Vertex:
def __init__(self, key):
self.id = key
self.connected_to = {}
def add_neighbour(self, nbr, weight=0):
self.connected_to[nbr] = weight
def __str__(self):
return str(self.id) + 'connected to: ' + str([x.id for x in self.connected_to])
def get_connections(self):
return self.connected_to.keys()
def get_id(self):
return self.id
def get_weight(self, nbr):
return self.connected_to[nbr]
class Graph:
def __init__(self):
self.vertices_dict = {}
self.number_of_vertices = 0
def add_vertex(self, key):
self.vertices_dict[key] = Vertex(key)
self.number_of_vertices += 1
def get_vertex(self, n):
if n in self.vertices_dict:
return self.vertices_dict[n]
return None
def __contains__(self, n):
return n in self.vertices_dict
def add_edge(self, from_vertex, to_vertex, cost=0):
if from_vertex not in self.vertices_dict:
self.vertices_dict[from_vertex] = Vertex(from_vertex)
if to_vertex not in self.vertices_dict:
self.vertices_dict[to_vertex] = Vertex(to_vertex)
self.vertices_dict[from_vertex].add_neighbour(
self.vertices_dict[to_vertex], cost)
def get_vertices(self):
return self.vertices_dict.keys()
def __iter__(self):
return iter(self.vertices_dict.values())
if __name__ == "__main__":
graph = Graph()
for a in range(6):
graph.add_vertex(a)
print(graph.vertices_dict)
graph.add_edge(0, 1, 5)
graph.add_edge(0, 5, 2)
graph.add_edge(1, 2, 4)
graph.add_edge(2, 3, 9)
graph.add_edge(3, 4, 7)
graph.add_edge(3, 5, 3)
graph.add_edge(4, 0, 1)
graph.add_edge(5, 4, 8)
graph.add_edge(5, 2, 1)
for vertex in graph:
for neighbour in vertex.get_connections():
print("vertex={} neighbour={} weight={}".format(
vertex.get_id(), neighbour.get_id(), vertex.get_weight(neighbour)))
| [
"venkatsgithub@gmail.com"
] | venkatsgithub@gmail.com |
32ad8a6dc7a1a941b5e1d351bfda37a2c138068b | 470b33c72fedcb00f8d4024014a98f800fabe950 | /Myproject/DataBaseClasses/Contests.py | 4ac5075d62b6569260018594316bc7a567ff25c9 | [] | no_license | thenoobestever/view | c2e71ab543dee5a9477ac53c5a70499a24ea085e | b4ba5bd3dd9284d975882da9a5a817c8984e4618 | refs/heads/master | 2022-11-15T12:57:32.893553 | 2020-07-15T20:06:41 | 2020-07-15T20:06:41 | 279,966,323 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,083 | py | import datetime
class Contest:
def __init__(self, id: str, name: str, description: str, filePath: str, handle: str, contestTime: float, startDate: datetime):
self.__id = id
self.__name = name
self.__description = description
self.__filePath = filePath
self.__handle = handle
self.__contestTime = contestTime
self.__startDate = startDate
def tuple(self):
return self.id, self.name, self.description, self.handle, self.contestTime, self.startDate
def __str__(self):
return "Contest : " + self.id + ' ' + self.name + ' ' + self.description + ' ' + self.handle + ' ' + str(
self.contestTime) + ' ' + str(self.startDate)
def __repr__(self):
return "Contest : " + self.id + ' ' + self.name + ' ' + self.description + ' ' + self.handle + ' ' + str(
self.contestTime) + ' ' + str(self.startDate)
@property
def id(self):
return self.__id
@id.setter
def id(self, id):
self.___id = id
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
@property
def description(self):
return self.__description
@description.setter
def description(self, description):
self.__description = description
@property
def handle(self):
return self.__handle
@handle.setter
def handle(self, handle):
self.__handle = handle
@property
def contestTime(self):
return self.__contestTime
@contestTime.setter
def contestTime(self, contestTime):
self.__contestTime = contestTime
@property
def startDate(self):
return self.__startDate
@startDate.setter
def startDate(self, startDate):
self.__startDate = startDate
@property
def filePath(self):
return self.__filePath
@filePath.setter
def filePath(self, filePath):
self.__filePath = filePath | [
"noreply@github.com"
] | thenoobestever.noreply@github.com |
485a1cfc467fb1950d7286d8267618bc40edeec6 | e22ee47e4b0641e7319a03895e2f69d8ac886bfc | /3_wprowadzenie/zadanie_8.py | 3493b2ba3ebfcd6548f2a82bed013433edbbcfd1 | [] | no_license | MihailRascalov/wd | cc53d1706246a20bc08bfae605a55ba9c5463f6c | c0f263bdd0338bab94017a38a8e3d3da1a401802 | refs/heads/master | 2021-02-28T14:11:12.211077 | 2020-05-25T09:57:59 | 2020-05-25T09:57:59 | 245,703,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | # Zdefiniuj funkcję, która zwraca sumę dowolnego ciągu arytmertcznego.
# Funkcja niech przyjmuje jako parametry: a1 (wartość początkowa), r (wielkość o ile rosną
# kolejne elementy) i ile_elementów (ile elementów ma sumować). Ponadto funkcja niech
# przyjmuje wartości domyślne: a1=1, r=1, ile=10.
def sum_of_the_arithmetic_sequence(a1 = 1, r = 1, how_many_elements = 10):
"""The function returns the sum of any arithmetic string
a1 => initial value
r => the amount by which the next elements grow
ile_elementów => how many items to sum"""
if how_many_elements == 1:
return a1
elif how_many_elements == 0:
return 0.0
sum = a1
sum_sequence = a1
for i in range(1, how_many_elements):
sum += r
sum_sequence += sum
return sum_sequence
print(sum_of_the_arithmetic_sequence())
print(sum_of_the_arithmetic_sequence(1, 2, 5)) | [
"MihailRascalov@protonmail.com"
] | MihailRascalov@protonmail.com |
b02ae033e5fa1d5350f30742962ee875d03d9e66 | 33377c4d113cf9ea1338077af6bd645f3c5796b3 | /extensions/ringlibuv/libuv/uv.gyp | 19008dfa8727452612431c6c3ffa2fdb27ccd9f7 | [
"CC-BY-4.0",
"BSD-3-Clause",
"MIT",
"ISC",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-free-unknown"
] | permissive | ring-lang/ring | 56f55111f751b0274d4f588871f806a8c2734dea | 2081d2efb41c9dacd49b7a6944b9ca332638ba64 | refs/heads/master | 2023-09-04T16:54:44.309452 | 2023-09-03T10:28:57 | 2023-09-03T10:28:57 | 54,633,811 | 1,360 | 555 | MIT | 2023-08-03T01:48:24 | 2016-03-24T10:29:27 | C | UTF-8 | Python | false | false | 18,788 | gyp | {
'target_defaults': {
'conditions': [
['OS != "win"', {
'defines': [
'_LARGEFILE_SOURCE',
'_FILE_OFFSET_BITS=64',
],
'conditions': [
['OS=="solaris"', {
'cflags': [ '-pthreads' ],
}],
['OS not in "solaris android zos"', {
'cflags': [ '-pthread' ],
}],
['OS in "zos"', {
'defines': [
'_UNIX03_THREADS',
'_UNIX03_SOURCE',
'_UNIX03_WITHDRAWN',
'_OPEN_SYS_IF_EXT',
'_OPEN_SYS_SOCK_IPV6',
'_OPEN_MSGQ_EXT',
'_XOPEN_SOURCE_EXTENDED',
'_ALL_SOURCE',
'_LARGE_TIME_API',
'_OPEN_SYS_FILE_EXT',
'_AE_BIMODAL',
'PATH_MAX=255'
],
'cflags': [ '-qxplink' ],
'ldflags': [ '-qxplink' ],
}]
],
}],
],
'xcode_settings': {
'GCC_SYMBOLS_PRIVATE_EXTERN': 'YES', # -fvisibility=hidden
'WARNING_CFLAGS': [ '-Wall', '-Wextra', '-Wno-unused-parameter', '-Wstrict-prototypes' ],
'OTHER_CFLAGS': [ '-g', '--std=gnu89', '-pedantic' ],
}
},
'targets': [
{
'target_name': 'libuv',
'type': '<(uv_library)',
'include_dirs': [
'include',
'src/',
],
'direct_dependent_settings': {
'include_dirs': [ 'include' ],
'conditions': [
['OS != "win"', {
'defines': [
'_LARGEFILE_SOURCE',
'_FILE_OFFSET_BITS=64',
],
}],
['OS in "mac ios"', {
'defines': [ '_DARWIN_USE_64_BIT_INODE=1' ],
}],
['OS == "linux"', {
'defines': [ '_POSIX_C_SOURCE=200112' ],
}],
],
},
'sources': [
'common.gypi',
'include/uv.h',
'include/tree.h',
'include/uv-errno.h',
'include/uv-threadpool.h',
'include/uv-version.h',
'src/fs-poll.c',
'src/heap-inl.h',
'src/inet.c',
'src/queue.h',
'src/threadpool.c',
'src/uv-data-getter-setters.c',
'src/uv-common.c',
'src/uv-common.h',
'src/version.c'
],
'conditions': [
[ 'OS=="win"', {
'defines': [
'_WIN32_WINNT=0x0600',
'_GNU_SOURCE',
],
'sources': [
'include/uv-win.h',
'src/win/async.c',
'src/win/atomicops-inl.h',
'src/win/core.c',
'src/win/detect-wakeup.c',
'src/win/dl.c',
'src/win/error.c',
'src/win/fs.c',
'src/win/fs-event.c',
'src/win/getaddrinfo.c',
'src/win/getnameinfo.c',
'src/win/handle.c',
'src/win/handle-inl.h',
'src/win/internal.h',
'src/win/loop-watcher.c',
'src/win/pipe.c',
'src/win/thread.c',
'src/win/poll.c',
'src/win/process.c',
'src/win/process-stdio.c',
'src/win/req.c',
'src/win/req-inl.h',
'src/win/signal.c',
'src/win/snprintf.c',
'src/win/stream.c',
'src/win/stream-inl.h',
'src/win/tcp.c',
'src/win/tty.c',
'src/win/timer.c',
'src/win/udp.c',
'src/win/util.c',
'src/win/winapi.c',
'src/win/winapi.h',
'src/win/winsock.c',
'src/win/winsock.h',
],
'link_settings': {
'libraries': [
'-ladvapi32',
'-liphlpapi',
'-lpsapi',
'-lshell32',
'-luser32',
'-luserenv',
'-lws2_32'
],
},
}, { # Not Windows i.e. POSIX
'sources': [
'include/uv-unix.h',
'include/uv-linux.h',
'include/uv-sunos.h',
'include/uv-darwin.h',
'include/uv-bsd.h',
'include/uv-aix.h',
'src/unix/async.c',
'src/unix/atomic-ops.h',
'src/unix/core.c',
'src/unix/dl.c',
'src/unix/fs.c',
'src/unix/getaddrinfo.c',
'src/unix/getnameinfo.c',
'src/unix/internal.h',
'src/unix/loop.c',
'src/unix/loop-watcher.c',
'src/unix/pipe.c',
'src/unix/poll.c',
'src/unix/process.c',
'src/unix/signal.c',
'src/unix/spinlock.h',
'src/unix/stream.c',
'src/unix/tcp.c',
'src/unix/thread.c',
'src/unix/timer.c',
'src/unix/tty.c',
'src/unix/udp.c',
],
'link_settings': {
'libraries': [ '-lm' ],
'conditions': [
['OS=="solaris"', {
'ldflags': [ '-pthreads' ],
}],
[ 'OS=="zos" and uv_library=="shared_library"', {
'ldflags': [ '-Wl,DLL' ],
}],
['OS != "solaris" and OS != "android" and OS != "zos"', {
'ldflags': [ '-pthread' ],
}],
],
},
'conditions': [
['uv_library=="shared_library"', {
'conditions': [
['OS=="zos"', {
'cflags': [ '-qexportall' ],
}, {
'cflags': [ '-fPIC' ],
}],
],
}],
['uv_library=="shared_library" and OS!="mac" and OS!="zos"', {
# This will cause gyp to set soname
# Must correspond with UV_VERSION_MAJOR
# in include/uv-version.h
'product_extension': 'so.1',
}],
],
}],
[ 'OS in "linux mac ios android zos"', {
'sources': [ 'src/unix/proctitle.c' ],
}],
[ 'OS != "zos"', {
'cflags': [
'-fvisibility=hidden',
'-g',
'--std=gnu89',
'-pedantic',
'-Wall',
'-Wextra',
'-Wno-unused-parameter',
'-Wstrict-prototypes',
],
}],
[ 'OS in "mac ios"', {
'sources': [
'src/unix/darwin.c',
'src/unix/fsevents.c',
'src/unix/darwin-proctitle.c'
],
'defines': [
'_DARWIN_USE_64_BIT_INODE=1',
'_DARWIN_UNLIMITED_SELECT=1',
]
}],
[ 'OS=="linux"', {
'defines': [ '_GNU_SOURCE' ],
'sources': [
'src/unix/linux-core.c',
'src/unix/linux-inotify.c',
'src/unix/linux-syscalls.c',
'src/unix/linux-syscalls.h',
'src/unix/procfs-exepath.c',
'src/unix/sysinfo-loadavg.c',
'src/unix/sysinfo-memory.c',
],
'link_settings': {
'libraries': [ '-ldl', '-lrt' ],
},
}],
[ 'OS=="android"', {
'sources': [
'src/unix/linux-core.c',
'src/unix/linux-inotify.c',
'src/unix/linux-syscalls.c',
'src/unix/linux-syscalls.h',
'src/unix/pthread-fixes.c',
'src/unix/android-ifaddrs.c',
'src/unix/procfs-exepath.c',
'src/unix/sysinfo-loadavg.c',
'src/unix/sysinfo-memory.c',
],
'link_settings': {
'libraries': [ '-ldl' ],
},
}],
[ 'OS=="solaris"', {
'sources': [
'src/unix/no-proctitle.c',
'src/unix/sunos.c',
],
'defines': [
'__EXTENSIONS__',
'_XOPEN_SOURCE=500',
],
'link_settings': {
'libraries': [
'-lkstat',
'-lnsl',
'-lsendfile',
'-lsocket',
],
},
}],
[ 'OS=="aix"', {
'variables': {
'os_name': '<!(uname -s)',
},
'sources': [
'src/unix/aix-common.c',
],
'defines': [
'_ALL_SOURCE',
'_XOPEN_SOURCE=500',
'_LINUX_SOURCE_COMPAT',
'_THREAD_SAFE',
],
'conditions': [
[ '"<(os_name)"=="OS400"', {
'sources': [
'src/unix/ibmi.c',
'src/unix/posix-poll.c',
'src/unix/no-fsevents.c',
'src/unix/no-proctitle.c',
],
'defines': [
'_PASE=1'
],
}, {
'sources': [
'src/unix/aix.c'
],
'defines': [
'HAVE_SYS_AHAFS_EVPRODS_H'
],
'link_settings': {
'libraries': [
'-lperfstat',
],
},
}],
]
}],
[ 'OS=="freebsd" or OS=="dragonflybsd"', {
'sources': [ 'src/unix/freebsd.c' ],
}],
[ 'OS=="openbsd"', {
'sources': [ 'src/unix/openbsd.c' ],
}],
[ 'OS=="netbsd"', {
'link_settings': {
'libraries': [ '-lkvm' ],
},
'sources': [ 'src/unix/netbsd.c' ],
}],
[ 'OS in "freebsd dragonflybsd openbsd netbsd".split()', {
'sources': [ 'src/unix/posix-hrtime.c' ],
}],
[ 'OS in "ios mac freebsd dragonflybsd openbsd netbsd".split()', {
'sources': [
'src/unix/bsd-ifaddrs.c',
'src/unix/kqueue.c',
],
}],
['uv_library=="shared_library"', {
'defines': [ 'BUILDING_UV_SHARED=1' ]
}],
['OS=="zos"', {
'sources': [
'src/unix/pthread-fixes.c',
'src/unix/os390.c',
'src/unix/os390-syscalls.c'
]
}],
]
},
{
'target_name': 'run-tests',
'type': 'executable',
'dependencies': [ 'libuv' ],
'sources': [
'test/blackhole-server.c',
'test/echo-server.c',
'test/run-tests.c',
'test/runner.c',
'test/runner.h',
'test/test-get-loadavg.c',
'test/task.h',
'test/test-active.c',
'test/test-async.c',
'test/test-async-null-cb.c',
'test/test-callback-stack.c',
'test/test-callback-order.c',
'test/test-close-fd.c',
'test/test-close-order.c',
'test/test-connect-unspecified.c',
'test/test-connection-fail.c',
'test/test-cwd-and-chdir.c',
'test/test-default-loop-close.c',
'test/test-delayed-accept.c',
'test/test-error.c',
'test/test-embed.c',
'test/test-emfile.c',
'test/test-env-vars.c',
'test/test-fail-always.c',
'test/test-fork.c',
'test/test-fs.c',
'test/test-fs-copyfile.c',
'test/test-fs-event.c',
'test/test-getters-setters.c',
'test/test-get-currentexe.c',
'test/test-get-memory.c',
'test/test-get-passwd.c',
'test/test-getaddrinfo.c',
'test/test-gethostname.c',
'test/test-getnameinfo.c',
'test/test-getsockname.c',
'test/test-handle-fileno.c',
'test/test-homedir.c',
'test/test-hrtime.c',
'test/test-idle.c',
'test/test-ip6-addr.c',
'test/test-ipc.c',
'test/test-ipc-send-recv.c',
'test/test-list.h',
'test/test-loop-handles.c',
'test/test-loop-alive.c',
'test/test-loop-close.c',
'test/test-loop-stop.c',
'test/test-loop-time.c',
'test/test-loop-configure.c',
'test/test-walk-handles.c',
'test/test-watcher-cross-stop.c',
'test/test-multiple-listen.c',
'test/test-osx-select.c',
'test/test-pass-always.c',
'test/test-ping-pong.c',
'test/test-pipe-bind-error.c',
'test/test-pipe-connect-error.c',
'test/test-pipe-connect-multiple.c',
'test/test-pipe-connect-prepare.c',
'test/test-pipe-getsockname.c',
'test/test-pipe-pending-instances.c',
'test/test-pipe-sendmsg.c',
'test/test-pipe-server-close.c',
'test/test-pipe-close-stdout-read-stdin.c',
'test/test-pipe-set-non-blocking.c',
'test/test-pipe-set-fchmod.c',
'test/test-platform-output.c',
'test/test-poll.c',
'test/test-poll-close.c',
'test/test-poll-close-doesnt-corrupt-stack.c',
'test/test-poll-closesocket.c',
'test/test-poll-oob.c',
'test/test-process-title.c',
'test/test-process-title-threadsafe.c',
'test/test-queue-foreach-delete.c',
'test/test-ref.c',
'test/test-run-nowait.c',
'test/test-run-once.c',
'test/test-semaphore.c',
'test/test-shutdown-close.c',
'test/test-shutdown-eof.c',
'test/test-shutdown-twice.c',
'test/test-signal.c',
'test/test-signal-multiple-loops.c',
'test/test-socket-buffer-size.c',
'test/test-spawn.c',
'test/test-fs-poll.c',
'test/test-stdio-over-pipes.c',
'test/test-tcp-alloc-cb-fail.c',
'test/test-tcp-bind-error.c',
'test/test-tcp-bind6-error.c',
'test/test-tcp-close.c',
'test/test-tcp-close-accept.c',
'test/test-tcp-close-while-connecting.c',
'test/test-tcp-create-socket-early.c',
'test/test-tcp-connect-error-after-write.c',
'test/test-tcp-shutdown-after-write.c',
'test/test-tcp-flags.c',
'test/test-tcp-connect-error.c',
'test/test-tcp-connect-timeout.c',
'test/test-tcp-connect6-error.c',
'test/test-tcp-open.c',
'test/test-tcp-write-to-half-open-connection.c',
'test/test-tcp-write-after-connect.c',
'test/test-tcp-writealot.c',
'test/test-tcp-write-fail.c',
'test/test-tcp-try-write.c',
'test/test-tcp-unexpected-read.c',
'test/test-tcp-oob.c',
'test/test-tcp-read-stop.c',
'test/test-tcp-write-queue-order.c',
'test/test-threadpool.c',
'test/test-threadpool-cancel.c',
'test/test-thread-equal.c',
'test/test-tmpdir.c',
'test/test-mutexes.c',
'test/test-thread.c',
'test/test-barrier.c',
'test/test-condvar.c',
'test/test-timer-again.c',
'test/test-timer-from-check.c',
'test/test-timer.c',
'test/test-tty.c',
'test/test-udp-alloc-cb-fail.c',
'test/test-udp-bind.c',
'test/test-udp-create-socket-early.c',
'test/test-udp-dgram-too-big.c',
'test/test-udp-ipv6.c',
'test/test-udp-open.c',
'test/test-udp-options.c',
'test/test-udp-send-and-recv.c',
'test/test-udp-send-hang-loop.c',
'test/test-udp-send-immediate.c',
'test/test-udp-send-unreachable.c',
'test/test-udp-multicast-join.c',
'test/test-udp-multicast-join6.c',
'test/test-dlerror.c',
'test/test-udp-multicast-ttl.c',
'test/test-ip4-addr.c',
'test/test-ip6-addr.c',
'test/test-udp-multicast-interface.c',
'test/test-udp-multicast-interface6.c',
'test/test-udp-try-send.c',
],
'conditions': [
[ 'OS=="win"', {
'sources': [
'test/runner-win.c',
'test/runner-win.h',
'src/win/snprintf.c',
],
'libraries': [ '-lws2_32' ]
}, { # POSIX
'sources': [
'test/runner-unix.c',
'test/runner-unix.h',
],
'conditions': [
[ 'OS != "zos"', {
'defines': [ '_GNU_SOURCE' ],
'cflags': [ '-Wno-long-long' ],
'xcode_settings': {
'WARNING_CFLAGS': [ '-Wno-long-long' ]
}
}],
]},
],
[ 'OS in "mac dragonflybsd freebsd linux netbsd openbsd".split()', {
'link_settings': {
'libraries': [ '-lutil' ],
},
}],
[ 'OS=="solaris"', { # make test-fs.c compile, needs _POSIX_C_SOURCE
'defines': [
'__EXTENSIONS__',
'_XOPEN_SOURCE=500',
],
}],
[ 'OS=="aix"', { # make test-fs.c compile, needs _POSIX_C_SOURCE
'defines': [
'_ALL_SOURCE',
'_XOPEN_SOURCE=500',
],
}],
['uv_library=="shared_library"', {
'defines': [ 'USING_UV_SHARED=1' ],
'conditions': [
[ 'OS == "zos"', {
'cflags': [ '-Wc,DLL' ],
}],
],
}],
],
'msvs-settings': {
'VCLinkerTool': {
'SubSystem': 1, # /subsystem:console
},
},
},
{
'target_name': 'run-benchmarks',
'type': 'executable',
'dependencies': [ 'libuv' ],
'sources': [
'test/benchmark-async.c',
'test/benchmark-async-pummel.c',
'test/benchmark-fs-stat.c',
'test/benchmark-getaddrinfo.c',
'test/benchmark-list.h',
'test/benchmark-loop-count.c',
'test/benchmark-million-async.c',
'test/benchmark-million-timers.c',
'test/benchmark-multi-accept.c',
'test/benchmark-ping-pongs.c',
'test/benchmark-pound.c',
'test/benchmark-pump.c',
'test/benchmark-sizes.c',
'test/benchmark-spawn.c',
'test/benchmark-thread.c',
'test/benchmark-tcp-write-batch.c',
'test/benchmark-udp-pummel.c',
'test/dns-server.c',
'test/echo-server.c',
'test/blackhole-server.c',
'test/run-benchmarks.c',
'test/runner.c',
'test/runner.h',
'test/task.h',
],
'conditions': [
[ 'OS=="win"', {
'sources': [
'test/runner-win.c',
'test/runner-win.h',
'src/win/snprintf.c',
],
'libraries': [ '-lws2_32' ]
}, { # POSIX
'defines': [ '_GNU_SOURCE' ],
'sources': [
'test/runner-unix.c',
'test/runner-unix.h',
]
}],
['uv_library=="shared_library"', {
'defines': [ 'USING_UV_SHARED=1' ],
'conditions': [
[ 'OS == "zos"', {
'cflags': [ '-Wc,DLL' ],
}],
],
}],
],
'msvs-settings': {
'VCLinkerTool': {
'SubSystem': 1, # /subsystem:console
},
},
},
]
}
| [
"msfclipper@yahoo.com"
] | msfclipper@yahoo.com |
928143745fcc2fc70c1fc5eb834b2487175baff5 | 4cc97a7c5c5be725e54b037cf99f87581adaa0e0 | /traffic_model/RL_models/__init__.py | 58d2a497e9e8344808cab048478de3451091f984 | [] | no_license | HiXixiLu/RL_ArtificialStupidity_traffic | 32dc7fc65d74310c3f2ef77a61af0950dd79ffc0 | e3d86afbb9edfe9ff67a0f1f7bd248b95cbb9f6e | refs/heads/master | 2021-03-10T17:43:24.865746 | 2020-05-26T08:03:08 | 2020-05-26T08:03:08 | 246,472,425 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | import sys,os
sys.path.append(os.getcwd() + '/traffic_model/RL_models') | [
"lu_sissi@126.com"
] | lu_sissi@126.com |
7943c82bfb5eef6a125f551f9bf92c8ed87f9028 | 7da0e8d03548ec83ec717a076add2199e543e3dd | /InvenTree/part/urls.py | 75d5041b9c89cb54a2d092a2a95eaf92b5418bb4 | [
"MIT"
] | permissive | Devarshi87/InvenTree | 7b90cbf14699861436ab127b9b7638cee81e30c4 | 2191b7f71972d4c3ba7322cc93936801a168ab3c | refs/heads/master | 2020-05-15T04:25:03.289794 | 2019-04-18T12:42:36 | 2019-04-18T12:42:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,522 | py | from django.conf.urls import url, include
from . import views
supplier_part_detail_urls = [
url(r'edit/?', views.SupplierPartEdit.as_view(), name='supplier-part-edit'),
url(r'delete/?', views.SupplierPartDelete.as_view(), name='supplier-part-delete'),
url('^.*$', views.SupplierPartDetail.as_view(), name='supplier-part-detail'),
]
supplier_part_urls = [
url(r'^new/?', views.SupplierPartCreate.as_view(), name='supplier-part-create'),
url(r'^(?P<pk>\d+)/', include(supplier_part_detail_urls)),
]
part_detail_urls = [
url(r'^edit/?', views.PartEdit.as_view(), name='part-edit'),
url(r'^delete/?', views.PartDelete.as_view(), name='part-delete'),
url(r'^track/?', views.PartDetail.as_view(template_name='part/track.html'), name='part-track'),
url(r'^bom-export/?', views.BomDownload.as_view(), name='bom-export'),
url(r'^bom/?', views.PartDetail.as_view(template_name='part/bom.html'), name='part-bom'),
url(r'^build/?', views.PartDetail.as_view(template_name='part/build.html'), name='part-build'),
url(r'^stock/?', views.PartDetail.as_view(template_name='part/stock.html'), name='part-stock'),
url(r'^used/?', views.PartDetail.as_view(template_name='part/used_in.html'), name='part-used-in'),
url(r'^allocation/?', views.PartDetail.as_view(template_name='part/allocation.html'), name='part-allocation'),
url(r'^suppliers/?', views.PartDetail.as_view(template_name='part/supplier.html'), name='part-suppliers'),
url(r'^thumbnail/?', views.PartImage.as_view(), name='part-image'),
# Any other URLs go to the part detail page
url(r'^.*$', views.PartDetail.as_view(), name='part-detail'),
]
part_category_urls = [
url(r'^edit/?', views.CategoryEdit.as_view(), name='category-edit'),
url(r'^delete/?', views.CategoryDelete.as_view(), name='category-delete'),
url('^.*$', views.CategoryDetail.as_view(), name='category-detail'),
]
part_bom_urls = [
url(r'^edit/?', views.BomItemEdit.as_view(), name='bom-item-edit'),
url('^delete/?', views.BomItemDelete.as_view(), name='bom-item-delete'),
url(r'^.*$', views.BomItemDetail.as_view(), name='bom-item-detail'),
]
# URL list for part web interface
part_urls = [
# Create a new category
url(r'^category/new/?', views.CategoryCreate.as_view(), name='category-create'),
# Create a new part
url(r'^new/?', views.PartCreate.as_view(), name='part-create'),
# Create a new BOM item
url(r'^bom/new/?', views.BomItemCreate.as_view(), name='bom-item-create'),
# Individual part
url(r'^(?P<pk>\d+)/', include(part_detail_urls)),
# Part category
url(r'^category/(?P<pk>\d+)/', include(part_category_urls)),
url(r'^bom/(?P<pk>\d+)/', include(part_bom_urls)),
# Top level part list (display top level parts and categories)
url(r'^.*$', views.PartIndex.as_view(), name='part-index'),
]
"""
part_param_urls = [
# Detail of a single part parameter
url(r'^(?P<pk>[0-9]+)/?$', views.PartParamDetail.as_view(), name='partparameter-detail'),
# Parameters associated with a particular part
url(r'^\?.*/?$', views.PartParamList.as_view()),
url(r'^$', views.PartParamList.as_view()),
]
part_param_template_urls = [
# Detail of a single part field template
url(r'^(?P<pk>[0-9]+)/?$', views.PartTemplateDetail.as_view(), name='partparametertemplate-detail'),
# List all part field templates
url(r'^\?.*/?$', views.PartTemplateList.as_view()),
url(r'^$', views.PartTemplateList.as_view())
]
"""
| [
"oliver.henry.walters@gmail.com"
] | oliver.henry.walters@gmail.com |
cc2beb913ec709cd72f32e48b0f742c884adf240 | b8c28c8230c0f0e6257376e8093242f6e25cc446 | /python/getdataandclean/caculate.py | edc6edbb3836dd024bc49f50ce873cc6fb2b13b5 | [] | no_license | songxiaoyuan/secondworkcode | 4e8b059564b45e97049beea7eeb25b4783923f46 | eabe4c7990e91a8a2454cece29a684b8af856920 | refs/heads/master | 2020-12-25T18:42:55.037686 | 2017-11-08T02:37:56 | 2017-11-08T02:37:56 | 93,979,086 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,343 | py | # coding: utf-8
import pandas as pd
import csv
import os
def calEma(path,emaperiod):
stock_data = pd.read_csv(path)
csvHeader = ["putLatPrice","putMiddPoint","putWeightActive","callLatPrice","callMiddPoint","callWeightActive"]
for volum in csvHeader:
stock_data['EMA_' + volum] = pd.ewma(stock_data[volum], span=emaperiod)
stock_data.to_csv(path, index=False)
# 此函数主要就是根据现在的文件,然后计算,最后进行返回给想要的数据。putAsk ,putBid,theo1,the2,the3
def writeTheOrderData(path):
csvFile = file("./put_call_data.csv","rb")
head = 0
csvWriteFile = file(path,"w")
writer = csv.writer(csvWriteFile)
index =1
for line in csv.reader(csvFile):
# import pdb
# pdb.set_trace()
if head==0:
head+=1
continue
for i in range(0,len(line)):
line[i] = float(line[i])
# putBid = line[0]
# putAsk=line[1]
putLatPrice = line[2]
# putLatPrice = line[7]*(line[10]/line[13])
putMiddPoint = line[8]*(line[11]/line[14])
putWeightActive = line[9]*(line[12]/line[15])
tmp = [index,putLatPrice,putMiddPoint,putWeightActive]
index +=1
for i in range(0,len(tmp)):
tmp[i] = round(tmp[i],2)
writer.writerow(tmp)
csvFile.close()
csvWriteFile.close()
def caculateTheLine(line):
# 根据传入的数据,进行计算,返回想要的bid,ask ,lastPrice,MiddPoint,WeightActive
lastPrice = float(line[4])
bid = float(line[22])
bidNum = float(line[23])
ask = float(line[24])
askNum = float(line[25])
MiddPoint = (bid+ask)/2
# 这个主要是用来保留2位小数。
WeightActive =(ask*bidNum+bid*askNum)/(askNum+bidNum)
tmp = [bid,ask,lastPrice,MiddPoint,WeightActive]
return tmp
def caculateTheLineDaoshu(line):
# 根据传入的数据,进行计算,返回想要的bid,ask ,lastPrice,MiddPoint,WeightActive
lastPrice = 1/float(line[4])
bid = float(line[22])
bidNum = float(line[23])
ask = float(line[24])
askNum = float(line[25])
MiddPoint = 2/(bid+ask)
# 这个主要是用来保留2位小数。
WeightActive = (askNum+bidNum)/(ask*bidNum+bid*askNum)
tmp = [bid,ask,lastPrice,MiddPoint,WeightActive]
return tmp
def getCsvFileData(putPath,callPath):
# 将call和put的两个数据读入到内存中,然后等待处理可以先写入csv中,然后在读。
csvHeader = ["putBid","putAsk","putLatPrice","putMiddPoint","putWeightActive","callBid","callAsk","callLatPrice","callMiddPoint","callWeightActive"]
csvFile = file("./put_call_data.csv","w")
writer = csv.writer(csvFile)
writer.writerow(csvHeader)
# 开始读取文件,然后将读取的数据插入到新的csv文件中。
insertData = []
# fileput = file('./cleanData_20170522_m1709P2750.csv','rb')
fileput = file(putPath,'rb')
readerput = csv.reader(fileput)
# filecall = file('./cleanData_20170522_m1709C2750.csv','rb')
filecall = file(callPath,'rb')
readercall = csv.reader(filecall)
for line in readerput:
tmp = caculateTheLine(line)
insertData.append(tmp)
i=0
for line in readercall:
tmp = caculateTheLineDaoshu(line)
put = insertData[i]
insertData[i] = put+tmp
i+=1
for line in insertData:
writer.writerow(line)
csvFile.close()
fileput.close()
filecall.close()
# 开始处理EMA,生产相应的MEA数据。
calEma("./put_call_data.csv",200)
def getLastPriceData(filepath):
fileput = file(filepath,'rb')
readerput = csv.reader(fileput)
data = []
for line in readerput:
lastPrice = line[4].strip()
time = line[20].strip()
tmp = time + ','+lastPrice
data.append(tmp)
writeArraytojs(data)
def writeArraytojs(data):
jsfile = open('data.js',"w")
jsfile.writelines("var data = [")
for l in data:
tmp ='"'+l+'"'+','+'\n'
jsfile.writelines(tmp)
jsfile.writelines("]")
if __name__ == '__main__':
# calEma()
# tmpSet = set()
# for root,dirs,files in os.walk("./clean"):
# for name in files:
# tmp= os.path.join(root,name)
# if 'P' in tmp:
# tmpSet.add(tmp)
# # cleanCsv(tmp)
# for item in tmpSet:
# putPath = item
# callPath = item.replace('P','C')
# print callPath
# getCsvFileData(putPath,callPath)
# getDataPath = "./getData/"+putPath.split('/')[2][5:]
# writeTheOrderData(getDataPath)
# callPath="clean_20170515_m1709C2700.csv"
# getDataPath = "./getData"+callPath[5:]
# print getDataPath
getLastPriceData("./20170612_pb1707.csv")
| [
"songxiaoyuannkcs@163.com"
] | songxiaoyuannkcs@163.com |
95ab9070fb13c67bfb2ff4c10dd2ab5b78bd8040 | b70d15de357245a6782e2ab72e7b0b0ce6e2a938 | /Chatbot/wsgi.py | eb20428d40763354065b04186629f8bee4108c49 | [] | no_license | gauravghid/ChatbotController | 240c56f8b082201e74f66e6160d943f20a549bc0 | c5b6dbceee91f2fe10dbfd2278511c5e7b7d88d6 | refs/heads/master | 2020-03-23T21:55:46.291189 | 2018-07-24T10:23:03 | 2018-07-24T10:23:03 | 142,141,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for Chatbot project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Chatbot.settings")
application = get_wsgi_application()
| [
"singhgaurav91@gmail.com"
] | singhgaurav91@gmail.com |
c14fc91233342cf32a251e2476dc92f259aa3f00 | 4c88091f790354c33fec35bf6cc0986f2c660c43 | /accounts/migrations/0022_auto_20200613_0829.py | 19d2e664c377098174300dc75f7c053ddc5215ce | [] | no_license | Masher828/UNIS | 6d4bcc1b20d8497ae472be9e5e5e8c4c79565a2b | 90e812d7574e79d79ed030f80bb1ac2db5078ffd | refs/heads/master | 2022-11-08T21:04:55.462995 | 2020-06-25T07:41:50 | 2020-06-25T07:41:50 | 268,544,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | # Generated by Django 3.0.6 on 2020-06-13 02:59
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0021_auto_20200606_1125'),
]
operations = [
migrations.CreateModel(
name='Customprofilepic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('custom_DP', models.ImageField(upload_to='custom_DP/')),
],
),
migrations.AlterField(
model_name='details',
name='created_at',
field=models.DateTimeField(default=datetime.datetime(2020, 6, 13, 8, 29, 31, 44340)),
),
]
| [
"manish.cse828@gmail.com"
] | manish.cse828@gmail.com |
e757f9fbd7c9648bbb1202401819539e73ea27a8 | dffd025a8bd8457d3133e3787690de85f94761cb | /todo/test_views.py | f7e26e9bef55cb7d74f11d8c60d216440e79e53a | [] | no_license | Skeogh89/ci-fsf-django | 7a8550c69b30bd44ab7c21f5a0394f6f8867cae8 | 0ad1525e61d0a094dbaa85c2020aa38609d79561 | refs/heads/master | 2023-08-08T00:55:16.729247 | 2021-02-12T00:19:29 | 2021-02-12T00:19:29 | 322,364,323 | 0 | 0 | null | 2021-09-22T19:43:44 | 2020-12-17T17:21:03 | HTML | UTF-8 | Python | false | false | 1,867 | py | from django.test import TestCase
from .models import Item
# Create your tests here.
class TestViews(TestCase):
def test_get_todo_list(self):
responce = self.client.get('/')
self.assertEqual(responce.status_code, 200)
self.assertTemplateUsed(responce, 'todo/todo_list.html')
def test_get_add_item_page(self):
responce = self.client.get('/add')
self.assertEqual(responce.status_code, 200)
self.assertTemplateUsed(responce, 'todo/add_item.html')
def test_get_edit_item_page(self):
item = Item.objects.create(name='Test Todo Item')
responce = self.client.get(f'/edit/{item.id}')
self.assertEqual(responce.status_code, 200)
self.assertTemplateUsed(responce, 'todo/edit_item.html')
def test_can_add_item_page(self):
responce = self.client.post('/add', {'name': 'Test Added Item'})
self.assertRedirects(responce, '/')
def test_can_delete_item(self):
item = Item.objects.create(name='Test Todo Item')
responce = self.client.get(f'/delete/{item.id}')
self.assertRedirects(responce, '/')
existing_items = Item.objects.filter(id=item.id)
self.assertEqual(len(existing_items), 0)
def test_can_toggle_item(self):
item = Item.objects.create(name='Test Todo Item', done=True)
responce = self.client.get(f'/toggle/{item.id}')
self.assertRedirects(responce, '/')
updated_item = Item.objects.get(id=item.id)
self.assertFalse(updated_item.done)
def test_can_edit_item(self):
item = Item.objects.create(name='Test Todo Item')
responce = self.client.post(f'/edit/{item.id}', {'name': 'Updated Name'})
self.assertRedirects(responce, '/')
updated_item = Item.objects.get(id=item.id)
self.assertEqual(updated_item.name, 'Updated Name')
| [
"stevokeogh@hotmail.com"
] | stevokeogh@hotmail.com |
fce81db8d06a0acb025336f3230e94f6e442c914 | c7d3c8f2667b73e68878253a95d034fd7f1f0583 | /env/Lib/site-packages/google/cloud/dialogflowcx_v3beta1/services/transition_route_groups/client.py | ea13b5eb4ebc6271f1f9e44940f1da2f202aec95 | [] | no_license | jeevana28/ivrchatbot | e57e9b94b2b6c201e79d27036eca2e6c1f5deb56 | fe5d281ebf774f46861b8f8eaea0494baf115f67 | refs/heads/master | 2023-06-07T01:20:40.547119 | 2021-07-06T15:47:15 | 2021-07-06T15:47:15 | 361,155,397 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,168 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.dialogflowcx_v3beta1.services.transition_route_groups import pagers
from google.cloud.dialogflowcx_v3beta1.types import page
from google.cloud.dialogflowcx_v3beta1.types import transition_route_group
from google.cloud.dialogflowcx_v3beta1.types import (
transition_route_group as gcdc_transition_route_group,
)
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from .transports.base import TransitionRouteGroupsTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import TransitionRouteGroupsGrpcTransport
from .transports.grpc_asyncio import TransitionRouteGroupsGrpcAsyncIOTransport
class TransitionRouteGroupsClientMeta(type):
"""Metaclass for the TransitionRouteGroups client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[TransitionRouteGroupsTransport]]
_transport_registry["grpc"] = TransitionRouteGroupsGrpcTransport
_transport_registry["grpc_asyncio"] = TransitionRouteGroupsGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[TransitionRouteGroupsTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class TransitionRouteGroupsClient(metaclass=TransitionRouteGroupsClientMeta):
"""Service for managing
[TransitionRouteGroups][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup].
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "dialogflow.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TransitionRouteGroupsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
TransitionRouteGroupsClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> TransitionRouteGroupsTransport:
"""Return the transport used by the client instance.
Returns:
TransitionRouteGroupsTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def flow_path(project: str, location: str, agent: str, flow: str,) -> str:
"""Return a fully-qualified flow string."""
return "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}".format(
project=project, location=location, agent=agent, flow=flow,
)
@staticmethod
def parse_flow_path(path: str) -> Dict[str, str]:
"""Parse a flow path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/flows/(?P<flow>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def intent_path(project: str, location: str, agent: str, intent: str,) -> str:
"""Return a fully-qualified intent string."""
return "projects/{project}/locations/{location}/agents/{agent}/intents/{intent}".format(
project=project, location=location, agent=agent, intent=intent,
)
@staticmethod
def parse_intent_path(path: str) -> Dict[str, str]:
"""Parse a intent path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/intents/(?P<intent>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def page_path(
project: str, location: str, agent: str, flow: str, page: str,
) -> str:
"""Return a fully-qualified page string."""
return "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/pages/{page}".format(
project=project, location=location, agent=agent, flow=flow, page=page,
)
@staticmethod
def parse_page_path(path: str) -> Dict[str, str]:
"""Parse a page path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/flows/(?P<flow>.+?)/pages/(?P<page>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def transition_route_group_path(
project: str, location: str, agent: str, flow: str, transition_route_group: str,
) -> str:
"""Return a fully-qualified transition_route_group string."""
return "projects/{project}/locations/{location}/agents/{agent}/flows/{flow}/transitionRouteGroups/{transition_route_group}".format(
project=project,
location=location,
agent=agent,
flow=flow,
transition_route_group=transition_route_group,
)
@staticmethod
def parse_transition_route_group_path(path: str) -> Dict[str, str]:
"""Parse a transition_route_group path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/flows/(?P<flow>.+?)/transitionRouteGroups/(?P<transition_route_group>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def webhook_path(project: str, location: str, agent: str, webhook: str,) -> str:
"""Return a fully-qualified webhook string."""
return "projects/{project}/locations/{location}/agents/{agent}/webhooks/{webhook}".format(
project=project, location=location, agent=agent, webhook=webhook,
)
@staticmethod
def parse_webhook_path(path: str) -> Dict[str, str]:
"""Parse a webhook path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/agents/(?P<agent>.+?)/webhooks/(?P<webhook>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[credentials.Credentials] = None,
transport: Union[str, TransitionRouteGroupsTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transition route groups client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, TransitionRouteGroupsTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
client_cert_source_func = (
mtls.default_client_cert_source() if is_mtls else None
)
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, TransitionRouteGroupsTransport):
# transport is a TransitionRouteGroupsTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, "
"provide its scopes directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def list_transition_route_groups(
self,
request: transition_route_group.ListTransitionRouteGroupsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListTransitionRouteGroupsPager:
r"""Returns the list of all transition route groups in
the specified flow.
Args:
request (google.cloud.dialogflowcx_v3beta1.types.ListTransitionRouteGroupsRequest):
The request object. The request message for
[TransitionRouteGroups.ListTransitionRouteGroups][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroups.ListTransitionRouteGroups].
parent (str):
Required. The flow to list all transition route groups
for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.services.transition_route_groups.pagers.ListTransitionRouteGroupsPager:
The response message for
[TransitionRouteGroups.ListTransitionRouteGroups][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroups.ListTransitionRouteGroups].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a transition_route_group.ListTransitionRouteGroupsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, transition_route_group.ListTransitionRouteGroupsRequest
):
request = transition_route_group.ListTransitionRouteGroupsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_transition_route_groups
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListTransitionRouteGroupsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def get_transition_route_group(
self,
request: transition_route_group.GetTransitionRouteGroupRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> transition_route_group.TransitionRouteGroup:
r"""Retrieves the specified
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup].
Args:
request (google.cloud.dialogflowcx_v3beta1.types.GetTransitionRouteGroupRequest):
The request object. The request message for
[TransitionRouteGroups.GetTransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroups.GetTransitionRouteGroup].
name (str):
Required. The name of the
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup].
Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/transitionRouteGroups/<Transition Route Group ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.TransitionRouteGroup:
An TransitionRouteGroup represents a group of
[TransitionRoutes][google.cloud.dialogflow.cx.v3beta1.TransitionRoute]
to be used by a
[Page][google.cloud.dialogflow.cx.v3beta1.Page].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a transition_route_group.GetTransitionRouteGroupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, transition_route_group.GetTransitionRouteGroupRequest
):
request = transition_route_group.GetTransitionRouteGroupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_transition_route_group
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def create_transition_route_group(
self,
request: gcdc_transition_route_group.CreateTransitionRouteGroupRequest = None,
*,
parent: str = None,
transition_route_group: gcdc_transition_route_group.TransitionRouteGroup = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_transition_route_group.TransitionRouteGroup:
r"""Creates an
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup]
in the specified flow.
Args:
request (google.cloud.dialogflowcx_v3beta1.types.CreateTransitionRouteGroupRequest):
The request object. The request message for
[TransitionRouteGroups.CreateTransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroups.CreateTransitionRouteGroup].
parent (str):
Required. The flow to create an
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup]
for. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
transition_route_group (google.cloud.dialogflowcx_v3beta1.types.TransitionRouteGroup):
Required. The transition route group
to create.
This corresponds to the ``transition_route_group`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.TransitionRouteGroup:
An TransitionRouteGroup represents a group of
[TransitionRoutes][google.cloud.dialogflow.cx.v3beta1.TransitionRoute]
to be used by a
[Page][google.cloud.dialogflow.cx.v3beta1.Page].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, transition_route_group])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcdc_transition_route_group.CreateTransitionRouteGroupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, gcdc_transition_route_group.CreateTransitionRouteGroupRequest
):
request = gcdc_transition_route_group.CreateTransitionRouteGroupRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if transition_route_group is not None:
request.transition_route_group = transition_route_group
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_transition_route_group
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def update_transition_route_group(
self,
request: gcdc_transition_route_group.UpdateTransitionRouteGroupRequest = None,
*,
transition_route_group: gcdc_transition_route_group.TransitionRouteGroup = None,
update_mask: field_mask.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gcdc_transition_route_group.TransitionRouteGroup:
r"""Updates the specified
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup].
Args:
request (google.cloud.dialogflowcx_v3beta1.types.UpdateTransitionRouteGroupRequest):
The request object. The request message for
[TransitionRouteGroups.UpdateTransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroups.UpdateTransitionRouteGroup].
transition_route_group (google.cloud.dialogflowcx_v3beta1.types.TransitionRouteGroup):
Required. The transition route group
to update.
This corresponds to the ``transition_route_group`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
The mask to control which fields get
updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.dialogflowcx_v3beta1.types.TransitionRouteGroup:
An TransitionRouteGroup represents a group of
[TransitionRoutes][google.cloud.dialogflow.cx.v3beta1.TransitionRoute]
to be used by a
[Page][google.cloud.dialogflow.cx.v3beta1.Page].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([transition_route_group, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a gcdc_transition_route_group.UpdateTransitionRouteGroupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, gcdc_transition_route_group.UpdateTransitionRouteGroupRequest
):
request = gcdc_transition_route_group.UpdateTransitionRouteGroupRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if transition_route_group is not None:
request.transition_route_group = transition_route_group
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.update_transition_route_group
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("transition_route_group.name", request.transition_route_group.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_transition_route_group(
self,
request: transition_route_group.DeleteTransitionRouteGroupRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the specified
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup].
Args:
request (google.cloud.dialogflowcx_v3beta1.types.DeleteTransitionRouteGroupRequest):
The request object. The request message for
[TransitionRouteGroups.DeleteTransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroups.DeleteTransitionRouteGroup].
name (str):
Required. The name of the
[TransitionRouteGroup][google.cloud.dialogflow.cx.v3beta1.TransitionRouteGroup]
to delete. Format:
``projects/<Project ID>/locations/<Location ID>/agents/<Agent ID>/flows/<Flow ID>/transitionRouteGroups/<Transition Route Group ID>``.
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a transition_route_group.DeleteTransitionRouteGroupRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request, transition_route_group.DeleteTransitionRouteGroupRequest
):
request = transition_route_group.DeleteTransitionRouteGroupRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_transition_route_group
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("TransitionRouteGroupsClient",)
| [
"50260923+jeevanakruthi@users.noreply.github.com"
] | 50260923+jeevanakruthi@users.noreply.github.com |
5fabb15c2d1c41f9ea0c59d2c5b136ad2120a585 | 3588ff396c1807a18e86c870855bfd50cd2229e9 | /old/style_test.py | 3630510e42f513914149f6f0715f5a314a8dde36 | [] | no_license | hamada2029/my_graph_demo | 5f974e0cf68f758434729ade108761e28662d312 | ad307479aaa9db8bef2454becba15c4bc0011e62 | refs/heads/main | 2023-03-28T11:55:39.640641 | 2021-03-31T04:21:02 | 2021-03-31T04:21:02 | 350,313,358 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
def main():
"""Main."""
# print(mpl.get_configdir())
# print(mpl.matplotlib_fname())
x = np.arange(0, 2 * np.pi, 0.1)
y = np.sin(x)
# sns.set()
# sns.set_palette("winter", 8)
plt.style.use('grayscale')
plt.plot(x, y)
plt.savefig('matplotlib_style_default.png')
return
# plt.style.use('ggplot')
with plt.style.context(['ggplot', 'dark_background']):
plt.plot(x, y)
plt.savefig('matplotlib_style_default.png')
# plt.show()
if __name__ == '__main__':
main()
| [
"8012459+hamada2029@users.noreply.github.com"
] | 8012459+hamada2029@users.noreply.github.com |
314ef5091e78eab377076882f9bae8f983d9e6e9 | 510dc17a7a06acde55425d67f965b16cbb643879 | /day10/point.py | 029a30203f58a6710ae41ce2e109641a8a0d14f9 | [] | no_license | Maerig/advent_of_code_2018 | b0410d3407538cd28f2fe38ac2a2ec6beb75abb4 | d49a42cda5a30d1a682e21cdc36c79d3517eac3c | refs/heads/master | 2020-04-09T01:40:48.115066 | 2018-12-18T05:36:42 | 2018-12-18T05:36:42 | 159,913,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | import re
from utils.vector import Vector
class Point:
vector_regex = "<\\s*(-?\\d+),\\s+(-?\\d+)>"
regex = re.compile(f"position={vector_regex}\\s+velocity={vector_regex}")
@classmethod
def parse(cls, raw_string):
match = Point.regex.match(raw_string)
params = tuple(
int(n)
for n in match.groups()
)
position = Vector(*params[:2])
velocity = Vector(*params[2:])
return cls(position, velocity)
def __init__(self, position, velocity):
self.position = position
self.velocity = velocity
def move(self, n=1):
self.position += self.velocity * n
| [
"jean-loup@sprocket.bz"
] | jean-loup@sprocket.bz |
ac015bfc74df07b50ffd7ca3d25af3e3d7f7f533 | 1c33fcc926d73912ac3107e0e96c40394288fa32 | /embedding_loader.py | 4402398fe436a4545886fe16cd92bd21fad145e9 | [] | no_license | shuheng-liu/fake-news-group2-project | f14ee86581193bedcffe3eff0f93d53d5ce7d99e | e2ba3fed917405ab59d29b4ec0564ea36239e253 | refs/heads/master | 2021-09-26T21:47:16.416146 | 2018-11-02T23:41:03 | 2018-11-02T23:41:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,948 | py | import os
import pickle as pkl
import numpy as np
from scipy.sparse import hstack
class EmbeddingLoader:
def __init__(self, parent_dir="embeddings"):
"""
inits the loader and save the directory where embeddings are saved
:param parent_dir: the directory where all embeddings are saved
"""
assert os.path.isdir(parent_dir), "{} is not a valid directory".format(parent_dir)
self.parent_dir = parent_dir
@staticmethod
def get_file(path):
try:
with open(path, "rb") as f:
item = pkl.load(f)
except FileNotFoundError as e:
print("unable to load {}, see stack trace below".format(path))
print("double check that you have the file saved {}".format(path))
print(e)
raise FileNotFoundError("{}".format(path)) # re-throw the error for catching
return item
@staticmethod
def get_onehot_filename(corpus="title", scorer="count", normalize=False):
return "{}-onehot(scorer={}{}).pkl".format(corpus, scorer, ", normalized" if normalize else "")
@staticmethod
def get_d2v_filename(corpus="title", vec_size=300, win_size=13, min_count=5, dm=False, epochs=100):
return "{}-d2v(vecsize={}, winsize={}, mincount={}, {}, epochs={}).pkl".format(
corpus, vec_size, win_size, min_count, "dm" if dm else "dbow", epochs
)
@staticmethod
def get_nd2v_filename(corpus="title", normalizer=None):
return "{}-nd2v(normalizer={}).pkl".format(corpus, normalizer)
def get_onehot(self, corpus="title", scorer="count", normalize=False):
"""
returns the onehot sum matrix
:param corpus: str, must be "title", "text". or "concat" (returns concatenated title and text)
:param scorer: str, either "count" or "tfidf"
:param normalize: bool, if set to True, normalized embeddings are returned
:return: scipy.sparse.csr_matrix, as the one-hot sum vector of the corpus
"""
assert corpus in ["title", "text", 'concat'], "`corpus` must be either 'title', 'text', or 'concat'"
assert scorer in ["count", "tfidf"], "`scorer` must be either 'count' or 'tfidf'"
assert isinstance(normalize, bool), "`normalize` must be a bool"
if corpus in ["title", "text"]:
filename = EmbeddingLoader.get_onehot_filename(corpus=corpus, scorer=scorer, normalize=normalize)
return EmbeddingLoader.get_file(os.path.join(self.parent_dir, filename))
else: # note that we are dealing with sparse matrix here, so use scipy.sparse.hstack instead of np.concatenate
return hstack([
self.get_onehot("title", scorer=scorer, normalize=normalize),
self.get_onehot("text", scorer=scorer, normalize=normalize)
])
def get_d2v(self, corpus="title", vec_size=300, win_size=13, min_count=5, dm=False, epochs=100):
"""
returns the d2v embeddings matrix
:param corpus: str, must be "title", "text". or "concat" (returns concatenated title and text)
:param vec_size: length of vector, default=300, best left untouched
:param win_size: wndow_size, default=13, only win_size=13 is computed so far
:param min_count: min_count to be included in dictionary, only min_count=5, 25, 50 are computed so far
:param dm: int or bool, denotes whether use DM or DBOW
:param epochs: number of epochs, only epochs=100 is computed so far
:return: numpy.ndarray, shape=(n_docs, n_dims), as the d2v embeddings matrix
"""
assert corpus in ["title", "text", 'concat'], "`corpus` must be either 'title', 'text', or 'concat'"
if corpus in ["title", "text"]:
filename = EmbeddingLoader.get_d2v_filename(corpus=corpus, vec_size=vec_size, win_size=win_size,
min_count=min_count, dm=dm, epochs=epochs)
return EmbeddingLoader.get_file(os.path.join(self.parent_dir, filename))
else:
return np.concatenate((self.get_d2v("title", vec_size=vec_size, win_size=win_size, min_count=min_count,
dm=dm, epochs=epochs),
self.get_d2v("text", vec_size=vec_size, win_size=win_size, min_count=min_count,
dm=dm, epochs=epochs)), axis=1)
def get_nd2v(self, corpus="title", normalizer=None):
"""
returns the naive d2v embeddings matrix
:param corpus: str, must be "title", "text". or "concat" (returns concatenated title and text)
:param normalizer: str, either "l2", "mean" or None
:return: numpy.ndarray, shape=(n_docs, n_dims), as the naive d2v embeddings matrix
"""
assert corpus in ["title", "text", 'concat'], "`corpus` must be either 'title', 'text', or 'concat'"
assert normalizer is None or normalizer in ["l2", "mean"], "`normalizer` must be 'l2', 'mean' or None"
if corpus in ["title", "text"]:
filename = EmbeddingLoader.get_nd2v_filename(corpus=corpus, normalizer=normalizer)
return EmbeddingLoader.get_file(os.path.join(self.parent_dir, filename))
else:
return np.concatenate(
(self.get_nd2v("title", normalizer=normalizer), self.get_nd2v("text", normalizer=normalizer)), axis=1)
def get_label(self):
"""
returns the labels, if you have a 'label.pkl' nested under `self.parent_dir`
:return: numpy.ndarray, shape=(n_docs,), as the label vector (0 for REAL and 1 for FAKE)
"""
return EmbeddingLoader.get_file(os.path.join(self.parent_dir, "label.pkl"))
if __name__ == '__main__':
loader = EmbeddingLoader("embeddings/")
d2v = loader.get_d2v("concat")
nd2v = loader.get_nd2v("concat")
onehot = loader.get_onehot("concat")
| [
"wish1104@outlook.com"
] | wish1104@outlook.com |
4aeffc8a1b7cda36656da03b4e6ec5d7ef46d6de | 881d74e5c15093dc8218cc96c4d5780d4c31d100 | /DownloadAppLocalhost/server/fajlovi/AdminLogin.py | 209a9cf8060242ea6f19c8418d1aeecd5bc6772d | [] | no_license | mihajilomrsevic/projects | b3e492797a214285ea8907d4814f917bc6a83a23 | 3989e79cf7495f3ffd18ba103bd4af4aa26fcb4b | refs/heads/master | 2021-08-08T17:15:30.232600 | 2020-06-16T01:05:13 | 2020-06-16T01:05:13 | 192,244,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | import tkinter;
from tkinter import *;
from tkinter import font;
forma = tkinter.Tk()
forma.minsize(width=500,height=500)
forma.maxsize(width=500,height=500)
forma.configure(bd=1, bg='#E1E2E1');
naslovFrame= Frame(forma, bd=1, width=500, height=500);
naslovFont=font.Font(family="Verdana", size=50);
naslov=Label(naslovFrame, bd=1,text='Admin login', font=naslovFont)
poljaFont=font.Font(family="Verdana", size=20);
naslov.pack()
naslovFrame.pack();
naslovFrame.place(relx=0.5, rely=0.2, anchor=CENTER)
unosPodataka=Frame(forma, bd=1, width=500, height=100);
usernamePolje=tkinter.Entry(unosPodataka,bd=0, bg='white',font=poljaFont);
usernamePolje.pack(pady=10,anchor=CENTER);
passwordPolje=tkinter.Entry(unosPodataka,bd=0, bg='white',font=poljaFont);
passwordPolje.pack(pady=10,anchor=CENTER);
submitButton=tkinter.Button(unosPodataka, bd=0, bg='#aa00ff', fg='white', font=poljaFont, text='PRIJAVA', width=19);
submitButton.pack(pady=10,anchor=CENTER);
# usernamePolje.place(relx=0.15,rely=0.2);
# passwordPolje.place(relx=0.15,rely=0.6);
# submitButton.place(relx=0.15,rely=0.8);
unosPodataka.pack(side=BOTTOM,anchor=S);
unosPodataka.place(relx=0.5, rely=0.5, anchor=CENTER);
forma.mainloop(); | [
"mmrsevic98@gmail.com"
] | mmrsevic98@gmail.com |
c1aa4d6027e3f7d4523231a8b46bb4074053cd46 | 79f12796b58f2a9a39e0ba99828ad46d298cf12e | /sample/settings.py | 1db7bfeb7fb12f185e95c033c1f7bd927efbfb87 | [] | no_license | ryosan-470/danger-with-django-sample | af30c3fa8c4a9219bcb1d3a41df8d7ac8d6b3c09 | 0de3b385d89f5f2c0d0559c1663896d14462a8c6 | refs/heads/master | 2021-01-23T01:40:21.055575 | 2017-03-23T09:16:55 | 2017-03-23T09:16:55 | 85,923,566 | 0 | 0 | null | 2017-03-28T02:10:08 | 2017-03-23T08:13:10 | Ruby | UTF-8 | Python | false | false | 3,096 | py | """
Django settings for sample project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0rg26zc$bty#h%u%oa*l=f-kfzrsw3d*deqjos%f!@yw7!vw6$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'sample.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'sample.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"ryosuke.sato@mixi.co.jp"
] | ryosuke.sato@mixi.co.jp |
48fdd9fa5aba23d7bfbf4bd119d4bcc4a83a85a2 | 35d62f3ccf1c422b13b313c4e519a5ce335e934d | /leetcode/jewelsAndStones.py | cc1e31157da05da0b2095b9c498ceeb4b90ee203 | [] | no_license | malaybiswal/python | 357a074889299effe6a5fa2f1cd9c50ca35652d0 | 684d24d719b785725e736671faf2681232ecc394 | refs/heads/master | 2020-05-17T22:25:43.043929 | 2019-05-08T23:41:19 | 2019-05-08T23:41:19 | 183,999,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | #https://leetcode.com/problems/jewels-and-stones/
def numJewelsInStones(J, S):
c=0
for x in J:
for y in S:
if(x==y):
c+=1
return c
x="zz"
y="ZZZ"
print(numJewelsInStones(x,y)) | [
"malay.biswal@rackspace.com"
] | malay.biswal@rackspace.com |
f3e0681e062486b344c6b7254be51d138515d544 | c4d0c6c11b1d8dbca8ac279ac2c455cc7d1ef33e | /main.py | 035e43756d5e9e9b2b133dc4b168195b59388faf | [] | no_license | Sanchoyzer/triangles | 2e9e5b0cac47c300275f231315e1a81436a23fb0 | 1b9240383fb2c73504df448506a05a8329185681 | refs/heads/master | 2020-05-03T10:19:53.406704 | 2019-03-30T15:42:12 | 2019-03-30T15:42:12 | 178,576,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | from triangles import TriangleManager, BadInput, SavePicFail, BaseTriangleException
def test1():
try:
a = TriangleManager(n=2, k=0.25, w=600, h=400, pic_name='pic1')
except BadInput as exc:
print('creation fail, reason: {!r}'.format(str(exc)))
return
try:
a.get_picture()
except SavePicFail as exc:
print('saving fail, reason: {}'.format(str(exc)))
return
def test2():
try:
a = TriangleManager(n=6, k=0.5, w=1200, h=800, pic_name='pic2')
a.get_picture()
except BaseTriangleException as exc:
print('total exception: {}'.format(str(exc)))
return
def main():
test1()
test2()
if __name__ == '__main__':
main()
| [
"sss0991@gmail.com"
] | sss0991@gmail.com |
618c3508fb56413067c1db9ac51da1f1a2f05c4d | 7dca409b0c3a5758c8f63c822932555c9ac20b9e | /main_app/models.py | b77f94a80733c169ffd36de374274a261f1471d5 | [] | no_license | ojdgonzo/Chemical-updates | b356c5910b38e8069cbaa9016fbc7c96e4048085 | 7fc3d2e265be8719842fd80e991c08d7a0f7eb7a | refs/heads/main | 2023-05-30T16:30:47.504216 | 2021-06-21T06:41:03 | 2021-06-21T06:41:03 | 377,715,156 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | py | from django.db import models
import re
class EngineerManager(models.Manager):
def user_validator(self, post_data):
errors = {}
if len(post_data['first_name'])<2:
errors['first_name'] = "First Name must be at least 1 character"
if len(post_data['last_name'])<2:
errors['last_name'] = "Last Name must be at least 1 character"
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
if not EMAIL_REGEX.match(post_data['email']): # test whether a field matches the pattern
errors['email'] = "Invalid email address!"
user_list = Engineer.objects.filter(email = post_data['email'])
if len(user_list) >0:
errors['email_dupe'] = "This Email already exists!"
if len(post_data['password']) < 9:
errors['password'] = "Your password must be at least 8 characters"
if post_data['password'] != post_data['confirm_password']:
errors['match_password'] = "Passwords do not match"
return errors
class Engineer(models.Model):
first_name = models.CharField(max_length=45)
last_name = models.CharField(max_length=45)
email = models.CharField(max_length=100)
password = models.CharField(max_length=30)
#engineer_created_recipes
objects = EngineerManager()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# Wet Etch Recipe Manager
class WetetchrecipeManager(models.Manager):
def recipe_validator(self, post_data):
errors ={}
if len(post_data['chemical_name']) < 2:
errors['chemical_name'] = "Chemical must containt at least 2 characters"
if post_data['liters_to_dispense'] == 0:
errors['liters_to_dispense'] = "You must dispense something"
return errors
# Wet Etch Recipe Model
class Wetetchrecipe(models.Model):
chemical_name = models.CharField(max_length=45)
liters_to_dispense = models.IntegerField()
engineer = models.ForeignKey(Engineer, related_name="engineer_created_recipes", on_delete= models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = WetetchrecipeManager()
| [
"noreply@github.com"
] | ojdgonzo.noreply@github.com |
516e7adfdc21f38790c5bfe5706d14864c96eaab | 3cd8bdcda9d0e549df184a5d9085ed8f5a86145d | /defining_classes/to_do_list/project/task.py | ad5910ea22a5b8855a43873b237cf5c1d554e494 | [] | no_license | ivklisurova/SoftUni_Python_OOP | bbec8a5d0d8c2c3f536dd2a92e9187aa39121692 | 59e2080b4eb0826a62a020ea3368a0bac6f644be | refs/heads/master | 2022-11-29T00:09:40.488544 | 2020-08-05T19:55:27 | 2020-08-05T19:55:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 962 | py | class Task:
def __init__(self, name, due_date):
self.name = name
self.due_date = due_date
self.comments = []
self.completed = False
def change_name(self, new_name: str):
if new_name == self.name:
return 'Name cannot be the same.'
self.name = new_name
return self.name
def change_due_date(self, new_date: str):
if new_date == self.due_date:
return 'Date cannot be the same.'
self.due_date = new_date
return self.due_date
def add_comment(self, comment: str):
self.comments.append(comment)
def edit_comment(self, comment_number: int, new_comment: str):
if comment_number >= len(self.comments):
return 'Cannot find comment.'
self.comments[comment_number] = new_comment
return f'{", ".join(self.comments)}'
def details(self):
return f'Name: {self.name} - Due Date: {self.due_date}'
| [
"55747390+ivklisurova@users.noreply.github.com"
] | 55747390+ivklisurova@users.noreply.github.com |
e7b3c28e67c42c208b0778ca9b4afdfddfd18a79 | 706518f154812af56f8fc91a71cd65d9667d9ed0 | /python/paddle/fluid/tests/unittests/test_device.py | 08697a080445e606f17bdde83384eef391713721 | [
"Apache-2.0"
] | permissive | andreazanetti/Paddle | 3ea464703d67963134ffc6828f364412adb03fce | a259076dd01801e2e619237da02235a4856a96bb | refs/heads/develop | 2023-04-25T08:30:43.751734 | 2021-05-05T01:31:44 | 2021-05-05T01:31:44 | 263,870,069 | 0 | 2 | Apache-2.0 | 2020-07-07T10:45:08 | 2020-05-14T09:22:07 | null | UTF-8 | Python | false | false | 3,379 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.framework as framework
class TestStaticDeviceManage(unittest.TestCase):
def _test_device(self, device_name, device_class):
paddle.set_device(device_name)
out1 = paddle.zeros(shape=[1, 3], dtype='float32')
out2 = paddle.ones(shape=[1, 3], dtype='float32')
out3 = paddle.concat(x=[out1, out2], axis=0)
exe = paddle.static.Executor()
exe.run(paddle.fluid.default_startup_program())
res = exe.run(fetch_list=[out3])
device = paddle.get_device()
self.assertEqual(isinstance(exe.place, device_class), True)
self.assertEqual(device, device_name)
def test_cpu_device(self):
self._test_device("cpu", core.CPUPlace)
def test_gpu_device(self):
if core.is_compiled_with_cuda():
self._test_device("gpu:0", core.CUDAPlace)
def test_xpu_device(self):
if core.is_compiled_with_xpu():
self._test_device("xpu:0", core.XPUPlace)
class TestImperativeDeviceManage(unittest.TestCase):
def test_cpu(self):
with fluid.dygraph.guard():
paddle.set_device('cpu')
out1 = paddle.zeros(shape=[1, 3], dtype='float32')
out2 = paddle.ones(shape=[1, 3], dtype='float32')
out3 = paddle.concat(x=[out1, out2], axis=0)
device = paddle.get_device()
self.assertEqual(
isinstance(framework._current_expected_place(), core.CPUPlace),
True)
self.assertEqual(device, "cpu")
def test_gpu(self):
if core.is_compiled_with_cuda():
with fluid.dygraph.guard():
paddle.set_device('gpu:0')
out1 = paddle.zeros(shape=[1, 3], dtype='float32')
out2 = paddle.ones(shape=[1, 3], dtype='float32')
out3 = paddle.concat(x=[out1, out2], axis=0)
device = paddle.get_device()
self.assertEqual(
isinstance(framework._current_expected_place(),
core.CUDAPlace), True)
self.assertEqual(device, "gpu:0")
def test_xpu(self):
if core.is_compiled_with_xpu():
with fluid.dygraph.guard():
out = paddle.to_tensor([1, 2])
device = paddle.get_device()
self.assertEqual(
isinstance(framework._current_expected_place(),
core.XPUPlace), True)
self.assertTrue(out.place.is_xpu_place())
self.assertEqual(device, "xpu:0")
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | andreazanetti.noreply@github.com |
e92bf9226665c58383336c5f1c1b790d90fd4ec0 | c8ecf1a41d5b852e60d2b1c0401dde0b472be684 | /survey/admin.py | 5445c6d27081d7bac1ac2b41d709ca92e21b179d | [] | no_license | xrpy/CL | ac35815b5e49d03cc71ad56bca0454dc9bffd2e9 | 2bca1fabb3bcf8967a3d09c0b77823e721a9dac9 | refs/heads/master | 2021-01-13T01:58:59.904456 | 2013-05-08T14:01:22 | 2013-05-08T14:01:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | from survey.models import QUESTIONS, ANSWERS, RESULTS, WEIGHT
from django.contrib import admin
admin.site.register(QUESTIONS)
admin.site.register(ANSWERS)
admin.site.register(RESULTS)
admin.site.register(WEIGHT) | [
"xrrppy@gmail.com"
] | xrrppy@gmail.com |
10afc408028671c8be35096088aebc257f10041b | 171523d60e5f59b4ae4464c924f984e35ff898ae | /pypipe-core/pypipe/formats.py | fd05125bf2441d6664b691ac44d3b37272f9deb4 | [
"MIT"
] | permissive | ctlab/pypipe | a92172aa36d2d06473e60e6ba666fd485eb4d29a | f20da66d915a0503ff367b648987c75620814b33 | refs/heads/master | 2020-06-07T23:53:17.906755 | 2014-06-04T13:53:51 | 2014-06-04T13:53:51 | 17,754,495 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,890 | py | import pypipe.basefile
class TextFile(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(TextFile, self).__init__(path, program, check)
class Fastq(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Fastq, self).__init__(path, program, check)
class Fasta(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Fasta, self).__init__(path, program, check)
class Bam(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Bam, self).__init__(path, program, check)
class Sam(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Sam, self).__init__(path, program, check)
class Bai(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Bai, self).__init__(path, program, check)
class Sai(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Sai, self).__init__(path, program, check)
class Fai(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Fai, self).__init__(path, program, check)
class Bcf(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Bcf, self).__init__(path, program, check)
class Vcf(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Vcf, self).__init__(path, program, check)
class Bed(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Bed, self).__init__(path, program, check)
class Qseq(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Qseq, self).__init__(path, program, check)
class Snp(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Snp, self).__init__(path, program, check)
class Pileup(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Pileup, self).__init__(path, program, check)
class Indel(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Indel, self).__init__(path, program, check)
class Cns(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Cns, self).__init__(path, program, check)
class Bowtie2Index(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(Bowtie2Index, self).__init__(path, program, check,
suff=['.1.bt2', '.2.bt2', '.3.bt2', '.4.bt2', '.rev.1.bt2', '.rev.2.bt2'])
class BwaIndex(pypipe.basefile.File):
def __init__(self, path, program=None, check=True):
super(BwaIndex, self).__init__(path, program, check,
suff=['.amb', '.ann', '.bwt', '.pac', '.sa'])
| [
"semkagtn@gmail.com"
] | semkagtn@gmail.com |
67056ff3f3511beb22ed46e346b3d52b30d40eed | cc1cd104b4b383e7807e75e2fb0a8e84e5fcf7df | /api_server/openpose_wrapper/openpose_server/app.py | 491381120447d9b6e7f8461f4eb89313c620e8c9 | [] | no_license | Sam1224/OutfitApp-AWS | b9884d40945d2076f2135c0d2d75cf938161af9f | 6c1b4d1e5c328c5d22b8f055d41a57ec2e9b921e | refs/heads/master | 2022-04-24T11:50:24.506423 | 2020-04-29T11:03:43 | 2020-04-29T11:03:43 | 257,340,558 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,164 | py | # coding=utf-8
import os
import sys
import argparse
import json
from PIL import Image
import cv2
import numpy as np
import itertools
# flask
import flask
#from flask import Flask, render_template, request, jsonify
# openpose python API
sys.path.append('../openpose_gpu/build/python');
from openpose import pyopenpose as op
# 自作モジュール
from utils import conv_base64_to_pillow, conv_base64_to_cv, conv_pillow_to_base64
#======================
# グローバル変数
#======================
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = False # 日本語文字化け対策
app.config["JSON_SORT_KEYS"] = False # ソートをそのまま
OPENPOSE_MODE_DIR_PATH = "../openpose_gpu/models/"
#================================================================
# "http://host_ip:5010" リクエスト送信時の処理
#================================================================
@app.route('/')
def index():
print( "リクエスト受け取り" )
return
#================================================================
# "http://host_ip:5010/openpose" にリクエスト送信時の処理
#================================================================
@app.route('/openpose', methods=['POST'])
def responce():
print( "リクエスト受け取り" )
if( app.debug ):
print( "flask.request.method : ", flask.request.method )
print( "flask.request.headers \n: ", flask.request.headers )
#------------------------------------------
# 送信された json データの取得
#------------------------------------------
if( flask.request.headers["User-Agent"].split("/")[0] in "python-requests" ):
json_data = json.loads(flask.request.json)
else:
json_data = flask.request.get_json()
#------------------------------------------
# 送信された画像データの変換
#------------------------------------------
pose_img_cv = conv_base64_to_cv( json_data["pose_img_base64"] )
if( app.debug ):
cv2.imwrite( "tmp/pose_img.png", pose_img_cv )
#------------------------------------------
# OpenPose Python-API の実行
# 参考 : openpose_gpu/build/examples/tutorial_api_python/01_body_from_image.py
#------------------------------------------
# パラメーターの設定
params = dict()
params["model_folder"] = OPENPOSE_MODE_DIR_PATH
params["face"] = True
params["hand"] = True
# OpenPose Python-API
opWrapper = op.WrapperPython()
opWrapper.configure(params)
opWrapper.start()
# Process Image
datum = op.Datum()
datum.cvInputData = pose_img_cv
opWrapper.emplaceAndPop([datum])
# keypoints の取得
pose_keypoints_2d = np.delete( datum.poseKeypoints, [8, 19, 20, 21, 22, 23, 24], axis=1).reshape(-1).tolist()
face_keypoints_2d = datum.faceKeypoints.reshape(-1).tolist()
pose_keypoints_3d = datum.poseKeypoints3D.tolist()
face_keypoints_3d = datum.faceKeypoints3D.tolist()
left_hand_keypoints_2d = datum.handKeypoints[0].reshape(-1).tolist()
right_hand_keypoints_2d = datum.handKeypoints[1].reshape(-1).tolist()
hand_left_keypoints_3d = datum.handKeypoints3D[0].tolist()
hand_right_keypoints_3d = datum.handKeypoints3D[1].tolist()
"""
if( args.debug ):
print("pose_keypoints_2d : ", pose_keypoints_2d )
#print("pose_keypoints_2d[0][0] : ", pose_keypoints_2d[0][0] )
#print("face_keypoints_2d: ", face_keypoints_2d )
#print("pose_keypoints_3d: ", pose_keypoints_3d )
#print("datum.cvOutputData: ", datum.cvOutputData )
"""
#------------------------------------------
# レスポンスメッセージの設定
#------------------------------------------
http_status_code = 200
response = flask.jsonify(
{
"version" : 1.3,
"people" : [
{
"pose_keypoints_2d" : pose_keypoints_2d,
"face_keypoints_2d" : face_keypoints_2d,
"hand_left_keypoints_2d" : left_hand_keypoints_2d,
"hand_right_keypoints_2d" : right_hand_keypoints_2d,
"pose_keypoints_3d" : pose_keypoints_3d,
"face_keypoints_3d" : face_keypoints_3d,
"hand_left_keypoints_3d" : hand_left_keypoints_3d,
"hand_right_keypoints_3d" : hand_right_keypoints_3d,
}
]
}
)
# レスポンスメッセージにヘッダーを付与(Access-Control-Allow-Origin エラー対策)
#response.headers.add('Access-Control-Allow-Origin', '*')
#response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
#response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE,OPTIONS')
if( app.debug ):
print( "response.headers : \n", response.headers )
return response, http_status_code
if __name__ == "__main__":
parser = argparse.ArgumentParser()
#parser.add_argument('--host', type=str, default="localhost", help="ホスト名(コンテナ名 or コンテナ ID)")
#parser.add_argument('--host', type=str, default="openpose_ubuntu_gpu_container", help="ホスト名(コンテナ名 or コンテナ ID)")
parser.add_argument('--host', type=str, default="0.0.0.0", help="ホスト名(コンテナ名 or コンテナ ID)")
parser.add_argument('--port', type=str, default="5010", help="ポート番号")
parser.add_argument('--enable_threaded', action='store_true', help="並列処理有効化")
parser.add_argument('--debug', action='store_true', help="デバッグモード有効化")
args = parser.parse_args()
if( args.debug ):
for key, value in vars(args).items():
print('%s: %s' % (str(key), str(value)))
if not os.path.exists("tmp"):
os.mkdir("tmp")
if( args.debug ):
app.debug = True
else:
app.debug = False
if( args.enable_threaded ):
app.run( host=args.host, port=args.port, threaded=False )
else:
app.run( host=args.host, port=args.port, threaded=True )
| [
"y034112@gmail.com"
] | y034112@gmail.com |
010609893dab43c598f8269f541a18362bd48909 | 1249c993606aac91a676e49103def607f833c994 | /dailyfresh/df_order/models.py | b268235ca8d9072e846076cc6658943aea517315 | [] | no_license | retime123/django1 | 19c7a3bedb277da7ffde10780be1c4ce69f78b02 | 8416b27b6f174ded10e03be200ace14ea52742b9 | refs/heads/master | 2021-01-22T11:28:21.543590 | 2017-06-09T10:00:47 | 2017-06-09T10:00:47 | 92,699,246 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 733 | py | #coding=utf-8
from django.db import models
# Create your models here.
class OrderInfo(models.Model):# 主表
oid= models.CharField(max_length=20,primary_key=True)
user = models.ForeignKey('df_user.FreshInfo')#关联:一对多,生成表是user_id
isPay = models.BooleanField(default=False)
odate = models.DateTimeField(auto_now_add=True)
ototal = models.DecimalField(max_digits=7,decimal_places=2)#这个订单总金额:5+2小数
oaddress = models.CharField(max_length=150)
class OrderDetailInfo(models.Model):
goods = models.ForeignKey('df_goods.GoodsInfo')
order = models.ForeignKey(OrderInfo)
price = models.DecimalField(max_digits=5,decimal_places=2)#单价
count = models.IntegerField() | [
"781816703@qq.com"
] | 781816703@qq.com |
4d6a0bdd82d3f861d1579335a2e2cb2392616ab4 | 8cf211cabe8c5169b2c9c3c6b38f69ac6c93b93e | /erp_confere/project_env/lib/python3.6/token.py | e2a01559213c4d00064b577563774fbcb2ec40f9 | [] | no_license | bopopescu/Python_Personal_Projects | 020256cb6835438b1b776eacb1a39d4cb5bc2efc | 025145130da5ac846b8aa14764783739ff68f64c | refs/heads/master | 2022-11-21T04:47:17.253558 | 2018-10-25T22:12:07 | 2018-10-25T22:12:07 | 281,146,690 | 0 | 0 | null | 2020-07-20T14:57:05 | 2020-07-20T14:57:04 | null | UTF-8 | Python | false | false | 47 | py | /home/vinicius/anaconda3/lib/python3.6/token.py | [
"vinicius.yosiura@live.com"
] | vinicius.yosiura@live.com |
41a05d7a93f1d4978b9f25113ff9613e69bb5310 | c858363a213a33dc1b062716feeabc8836302fca | /pandasTesting.py | e5c8dc5849defcb65d08722789469b08cae66a4f | [] | no_license | YoussefAlm/Disability-insights-USA-MOR- | 8ad9bcea2421ed9a05e6399644c2f27910e8b0fd | 93a5bde5810ea98e989e4f43e0fd49003f39f6fc | refs/heads/main | 2023-02-01T21:12:51.214611 | 2020-12-20T11:43:31 | 2020-12-20T11:43:31 | 323,055,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | from ETL.loader import EtlHcp04
import pandas as pd
## Sum 2 or more dataframes [Indexes/columns must be the same]
def dfs_sum(dfsArray):
if len(dfsArray) == 1:
return dfsArray[0]
newDf= pd.DataFrame(index=dfsArray[0].index)
for df in dfsArray:
newDf = newDf.add(df,fill_value=0)
return newDf
| [
"50412869+YoussefAlm@users.noreply.github.com"
] | 50412869+YoussefAlm@users.noreply.github.com |
388e39245dba1603abd5fd90c92d8d8fb0f3d346 | b1b4d4c2c1079ffab8bb7796df702d51a565e311 | /todo_module/model/todo.py | 3cd489c6041abc8a71ee9596862add3faee8b66a | [] | no_license | mikenac/unit_testing_presentation | 137ded5f0b8b9fcddbe0981aeb4190fbe11dc673 | aa9d12167c1a4f9088a72d4255273d2f02105863 | refs/heads/main | 2022-12-22T20:44:36.564113 | 2020-10-02T18:17:22 | 2020-10-02T18:17:22 | 300,700,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | import jsons
class Todo(jsons.JsonSerializable):
""" Todo class """
def __init__(self, id: int, userId: int, title: str, completed: bool):
self.id = id
self.userId = userId
self.title = title
self.completed = completed
| [
"mike-nacey@teletracking.com"
] | mike-nacey@teletracking.com |
3cf3115f67a4411fd1aa84cd1554d0fac8fcd0a1 | a66aeb0cb7d37212c8be33da4f0eb825ab1d188c | /blog/migrations/0001_initial.py | 629c83cb715f7c903b296f935dc8355c15cbfec5 | [] | no_license | jakubfabik/django_blog | 4f8a049b8d505063eb5df51bc10059907b87f4b4 | 981f65ffd2f61586b672c8f1ff44f00cbd61aea0 | refs/heads/master | 2022-05-27T18:56:09.209014 | 2022-05-07T19:19:30 | 2022-05-07T19:19:30 | 177,750,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 907 | py | # Generated by Django 2.1.7 on 2019-03-26 16:23
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"fabikjakub@outlook.com"
] | fabikjakub@outlook.com |
d54d95050bebcec69e6ae64667f6e9bcdc0cb4e4 | cac99ac11c7baf3ace66cc5a2f2b74fdc58346a1 | /ssd_detector/keras_ssd7.py | 7fa888a51344998d8f94bbdccf8f0b894ec0e58d | [] | no_license | yp000925/NVIDIA_AI_CITY_CHALLENGE-team41-track3 | 4333ef924c1fcadbc67a3cb0b3074b10212fa44d | c2db6ba61265b429e70dab743bb06c69897d106b | refs/heads/master | 2020-05-31T09:00:15.748617 | 2018-06-27T08:37:28 | 2018-06-27T08:37:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,345 | py | '''
A small 7-layer Keras model with SSD architecture. Also serves as a template to build arbitrary network architectures.
Copyright (C) 2017 Pierluigi Ferrari
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from __future__ import division
import numpy as np
from keras.models import Model
from keras.layers import Input, Lambda, Conv2D, MaxPooling2D, BatchNormalization, ELU, Reshape, Concatenate, Activation
from keras.regularizers import l2
from keras_layer_AnchorBoxes import AnchorBoxes
def build_model(image_size,
n_classes,
l2_regularization=0.0,
min_scale=0.1,
max_scale=0.9,
scales=None,
aspect_ratios_global=[0.5, 1.0, 2.0],
aspect_ratios_per_layer=None,
two_boxes_for_ar1=True,
steps=None,
offsets=None,
limit_boxes=False,
variances=[1.0, 1.0, 1.0, 1.0],
coords='centroids',
normalize_coords=False,
subtract_mean=None,
divide_by_stddev=None,
swap_channels=False,
return_predictor_sizes=False):
'''
Build a Keras model with SSD architecture, see references.
The model consists of convolutional feature layers and a number of convolutional
predictor layers that take their input from different feature layers.
The model is fully convolutional.
The implementation found here is a smaller version of the original architecture
used in the paper (where the base network consists of a modified VGG-16 extended
by a few convolutional feature layers), but of course it could easily be changed to
an arbitrarily large SSD architecture by following the general design pattern used here.
This implementation has 7 convolutional layers and 4 convolutional predictor
layers that take their input from layers 4, 5, 6, and 7, respectively.
Most of the arguments that this function takes are only needed for the anchor
box layers. In case you're training the network, the parameters passed here must
be the same as the ones used to set up `SSDBoxEncoder`. In case you're loading
trained weights, the parameters passed here must be the same as the ones used
to produce the trained weights.
Some of these arguments are explained in more detail in the documentation of the
`SSDBoxEncoder` class.
Note: Requires Keras v2.0 or later. Training currently works only with the
TensorFlow backend (v1.0 or later).
Arguments:
image_size (tuple): The input image size in the format `(height, width, channels)`.
n_classes (int): The number of positive classes, e.g. 20 for Pascal VOC, 80 for MS COCO.
l2_regularization (float, optional): The L2-regularization rate. Applies to all convolutional layers.
min_scale (float, optional): The smallest scaling factor for the size of the anchor boxes as a fraction
of the shorter side of the input images.
max_scale (float, optional): The largest scaling factor for the size of the anchor boxes as a fraction
of the shorter side of the input images. All scaling factors between the smallest and the
largest will be linearly interpolated. Note that the second to last of the linearly interpolated
scaling factors will actually be the scaling factor for the last predictor layer, while the last
scaling factor is used for the second box for aspect ratio 1 in the last predictor layer
if `two_boxes_for_ar1` is `True`.
scales (list, optional): A list of floats containing scaling factors per convolutional predictor layer.
This list must be one element longer than the number of predictor layers. The first `k` elements are the
scaling factors for the `k` predictor layers, while the last element is used for the second box
for aspect ratio 1 in the last predictor layer if `two_boxes_for_ar1` is `True`. This additional
last scaling factor must be passed either way, even if it is not being used.
Defaults to `None`. If a list is passed, this argument overrides `min_scale` and
`max_scale`. All scaling factors must be greater than zero.
aspect_ratios_global (list, optional): The list of aspect ratios for which anchor boxes are to be
generated. This list is valid for all predictor layers. The original implementation uses more aspect ratios
for some predictor layers and fewer for others. If you want to do that, too, then use the next argument instead.
Defaults to `[0.5, 1.0, 2.0]`.
aspect_ratios_per_layer (list, optional): A list containing one aspect ratio list for each predictor layer.
This allows you to set the aspect ratios for each predictor layer individually. If a list is passed,
it overrides `aspect_ratios_global`.
two_boxes_for_ar1 (bool, optional): Only relevant for aspect ratio lists that contain 1. Will be ignored otherwise.
If `True`, two anchor boxes will be generated for aspect ratio 1. The first will be generated
using the scaling factor for the respective layer, the second one will be generated using
geometric mean of said scaling factor and next bigger scaling factor. Defaults to `True`, following the original
implementation.
steps (list, optional): `None` or a list with as many elements as there are predictor layers. The elements can be
either ints/floats or tuples of two ints/floats. These numbers represent for each predictor layer how many
pixels apart the anchor box center points should be vertically and horizontally along the spatial grid over
the image. If the list contains ints/floats, then that value will be used for both spatial dimensions.
If the list contains tuples of two ints/floats, then they represent `(step_height, step_width)`.
If no steps are provided, then they will be computed such that the anchor box center points will form an
equidistant grid within the image dimensions.
offsets (list, optional): `None` or a list with as many elements as there are predictor layers. The elements can be
either floats or tuples of two floats. These numbers represent for each predictor layer how many
pixels from the top and left boarders of the image the top-most and left-most anchor box center points should be
as a fraction of `steps`. The last bit is important: The offsets are not absolute pixel values, but fractions
of the step size specified in the `steps` argument. If the list contains floats, then that value will
be used for both spatial dimensions. If the list contains tuples of two floats, then they represent
`(vertical_offset, horizontal_offset)`. If no offsets are provided, then they will default to 0.5 of the step size,
which is also the recommended setting.
limit_boxes (bool, optional): If `True`, limits box coordinates to stay within image boundaries.
This would normally be set to `True`, but here it defaults to `False`, following the original
implementation.
variances (list, optional): A list of 4 floats >0 with scaling factors (actually it's not factors but divisors
to be precise) for the encoded predicted box coordinates. A variance value of 1.0 would apply
no scaling at all to the predictions, while values in (0,1) upscale the encoded predictions and values greater
than 1.0 downscale the encoded predictions. If you want to reproduce the configuration of the original SSD,
set this to `[0.1, 0.1, 0.2, 0.2]`, provided the coordinate format is 'centroids'.
coords (str, optional): The box coordinate format to be used. Can be either 'centroids' for the format
`(cx, cy, w, h)` (box center coordinates, width, and height) or 'minmax' for the format
`(xmin, xmax, ymin, ymax)`.
normalize_coords (bool, optional): Set to `True` if the model is supposed to use relative instead of absolute coordinates,
i.e. if the model predicts box coordinates within [0,1] instead of absolute coordinates.
subtract_mean (array-like, optional): `None` or an array-like object of integers or floating point values
of any shape that is broadcast-compatible with the image shape. The elements of this array will be
subtracted from the image pixel intensity values. For example, pass a list of three integers
to perform per-channel mean normalization for color images.
divide_by_stddev (array-like, optional): `None` or an array-like object of non-zero integers or
floating point values of any shape that is broadcast-compatible with the image shape. The image pixel
intensity values will be divided by the elements of this array. For example, pass a list
of three integers to perform per-channel standard deviation normalization for color images.
swap_channels (bool, optional): If `True`, the color channel order of the input images will be reversed,
i.e. if the input color channel order is RGB, the color channels will be swapped to BGR.
return_predictor_sizes (bool, optional): If `True`, this function not only returns the model, but also
a list containing the spatial dimensions of the predictor layers. This isn't strictly necessary since
you can always get their sizes easily via the Keras API, but it's convenient and less error-prone
to get them this way. They are only relevant for training anyway (SSDBoxEncoder needs to know the
spatial dimensions of the predictor layers), for inference you don't need them.
Returns:
model: The Keras SSD model.
predictor_sizes (optional): A Numpy array containing the `(height, width)` portion
of the output tensor shape for each convolutional predictor layer. During
training, the generator function needs this in order to transform
the ground truth labels into tensors of identical structure as the
output tensors of the model, which is in turn needed for the cost
function.
References:
https://arxiv.org/abs/1512.02325v5
'''
n_predictor_layers = 4 # The number of predictor conv layers in the network
n_classes += 1 # Account for the background class.
l2_reg = l2_regularization # Make the internal name shorter.
img_height, img_width, img_channels = image_size[0], image_size[1], image_size[2]
############################################################################
# Get a few exceptions out of the way.
############################################################################
if aspect_ratios_global is None and aspect_ratios_per_layer is None:
raise ValueError("`aspect_ratios_global` and `aspect_ratios_per_layer` cannot both be None. At least one needs to be specified.")
if aspect_ratios_per_layer:
if len(aspect_ratios_per_layer) != n_predictor_layers:
raise ValueError("It must be either aspect_ratios_per_layer is None or len(aspect_ratios_per_layer) == {}, but len(aspect_ratios_per_layer) == {}.".format(n_predictor_layers, len(aspect_ratios_per_layer)))
if (min_scale is None or max_scale is None) and scales is None:
raise ValueError("Either `min_scale` and `max_scale` or `scales` need to be specified.")
if scales:
if len(scales) != n_predictor_layers+1:
raise ValueError("It must be either scales is None or len(scales) == {}, but len(scales) == {}.".format(n_predictor_layers+1, len(scales)))
else: # If no explicit list of scaling factors was passed, compute the list of scaling factors from `min_scale` and `max_scale`
scales = np.linspace(min_scale, max_scale, n_predictor_layers+1)
if len(variances) != 4: # We need one variance value for each of the four box coordinates
raise ValueError("4 variance values must be pased, but {} values were received.".format(len(variances)))
variances = np.array(variances)
if np.any(variances <= 0):
raise ValueError("All variances must be >0, but the variances given are {}".format(variances))
if (not (steps is None)) and (len(steps) != n_predictor_layers):
raise ValueError("You must provide at least one step value per predictor layer.")
if (not (offsets is None)) and (len(offsets) != n_predictor_layers):
raise ValueError("You must provide at least one offset value per predictor layer.")
############################################################################
# Compute the anchor box parameters.
############################################################################
# Set the aspect ratios for each predictor layer. These are only needed for the anchor box layers.
if aspect_ratios_per_layer:
aspect_ratios = aspect_ratios_per_layer
else:
aspect_ratios = [aspect_ratios_global] * n_predictor_layers
# Compute the number of boxes to be predicted per cell for each predictor layer.
# We need this so that we know how many channels the predictor layers need to have.
if aspect_ratios_per_layer:
n_boxes = []
for ar in aspect_ratios_per_layer:
if (1 in ar) & two_boxes_for_ar1:
n_boxes.append(len(ar) + 1) # +1 for the second box for aspect ratio 1
else:
n_boxes.append(len(ar))
else: # If only a global aspect ratio list was passed, then the number of boxes is the same for each predictor layer
if (1 in aspect_ratios_global) & two_boxes_for_ar1:
n_boxes = len(aspect_ratios_global) + 1
else:
n_boxes = len(aspect_ratios_global)
n_boxes = [n_boxes] * n_predictor_layers
if steps is None:
steps = [None] * n_predictor_layers
if offsets is None:
offsets = [None] * n_predictor_layers
############################################################################
# Build the network.
############################################################################
x = Input(shape=(img_height, img_width, img_channels))
# The following identity layer is only needed so that the subsequent lambda layers can be optional.
x1 = Lambda(lambda z: z, output_shape=(img_height, img_width, img_channels), name='identity_layer')(x)
if not (subtract_mean is None):
x1 = Lambda(lambda z: z - np.array(subtract_mean), output_shape=(img_height, img_width, img_channels), name='input_mean_normalization')(x1)
if not (divide_by_stddev is None):
x1 = Lambda(lambda z: z / np.array(divide_by_stddev), output_shape=(img_height, img_width, img_channels), name='input_stddev_normalization')(x1)
if swap_channels and (img_channels == 3):
x1 = Lambda(lambda z: z[...,::-1], output_shape=(img_height, img_width, img_channels), name='input_channel_swap')(x1)
conv1 = Conv2D(32, (5, 5), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv1')(x1)
conv1 = BatchNormalization(axis=3, momentum=0.99, name='bn1')(conv1) # Tensorflow uses filter format [filter_height, filter_width, in_channels, out_channels], hence axis = 3
conv1 = ELU(name='elu1')(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2), name='pool1')(conv1)
conv2 = Conv2D(48, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv2')(pool1)
conv2 = BatchNormalization(axis=3, momentum=0.99, name='bn2')(conv2)
conv2 = ELU(name='elu2')(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2), name='pool2')(conv2)
conv3 = Conv2D(64, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv3')(pool2)
conv3 = BatchNormalization(axis=3, momentum=0.99, name='bn3')(conv3)
conv3 = ELU(name='elu3')(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2), name='pool3')(conv3)
conv4 = Conv2D(64, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv4')(pool3)
conv4 = BatchNormalization(axis=3, momentum=0.99, name='bn4')(conv4)
conv4 = ELU(name='elu4')(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2), name='pool4')(conv4)
conv5 = Conv2D(48, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv5')(pool4)
conv5 = BatchNormalization(axis=3, momentum=0.99, name='bn5')(conv5)
conv5 = ELU(name='elu5')(conv5)
pool5 = MaxPooling2D(pool_size=(2, 2), name='pool5')(conv5)
conv6 = Conv2D(48, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv6')(pool5)
conv6 = BatchNormalization(axis=3, momentum=0.99, name='bn6')(conv6)
conv6 = ELU(name='elu6')(conv6)
pool6 = MaxPooling2D(pool_size=(2, 2), name='pool6')(conv6)
conv7 = Conv2D(32, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='conv7')(pool6)
conv7 = BatchNormalization(axis=3, momentum=0.99, name='bn7')(conv7)
conv7 = ELU(name='elu7')(conv7)
# The next part is to add the convolutional predictor layers on top of the base network
# that we defined above. Note that I use the term "base network" differently than the paper does.
# To me, the base network is everything that is not convolutional predictor layers or anchor
# box layers. In this case we'll have four predictor layers, but of course you could
# easily rewrite this into an arbitrarily deep base network and add an arbitrary number of
# predictor layers on top of the base network by simply following the pattern shown here.
# Build the convolutional predictor layers on top of conv layers 4, 5, 6, and 7.
# We build two predictor layers on top of each of these layers: One for class prediction (classification), one for box coordinate prediction (localization)
# We precidt `n_classes` confidence values for each box, hence the `classes` predictors have depth `n_boxes * n_classes`
# We predict 4 box coordinates for each box, hence the `boxes` predictors have depth `n_boxes * 4`
# Output shape of `classes`: `(batch, height, width, n_boxes * n_classes)`
classes4 = Conv2D(n_boxes[0] * n_classes, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='classes4')(conv4)
classes5 = Conv2D(n_boxes[1] * n_classes, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='classes5')(conv5)
classes6 = Conv2D(n_boxes[2] * n_classes, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='classes6')(conv6)
classes7 = Conv2D(n_boxes[3] * n_classes, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='classes7')(conv7)
# Output shape of `boxes`: `(batch, height, width, n_boxes * 4)`
boxes4 = Conv2D(n_boxes[0] * 4, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='boxes4')(conv4)
boxes5 = Conv2D(n_boxes[1] * 4, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='boxes5')(conv5)
boxes6 = Conv2D(n_boxes[2] * 4, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='boxes6')(conv6)
boxes7 = Conv2D(n_boxes[3] * 4, (3, 3), strides=(1, 1), padding="same", kernel_initializer='he_normal', kernel_regularizer=l2(l2_reg), name='boxes7')(conv7)
# Generate the anchor boxes
# Output shape of `anchors`: `(batch, height, width, n_boxes, 8)`
anchors4 = AnchorBoxes(img_height, img_width, this_scale=scales[0], next_scale=scales[1], aspect_ratios=aspect_ratios[0],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[0], this_offsets=offsets[0],
limit_boxes=limit_boxes, variances=variances, coords=coords, normalize_coords=normalize_coords, name='anchors4')(boxes4)
anchors5 = AnchorBoxes(img_height, img_width, this_scale=scales[1], next_scale=scales[2], aspect_ratios=aspect_ratios[1],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[1], this_offsets=offsets[1],
limit_boxes=limit_boxes, variances=variances, coords=coords, normalize_coords=normalize_coords, name='anchors5')(boxes5)
anchors6 = AnchorBoxes(img_height, img_width, this_scale=scales[2], next_scale=scales[3], aspect_ratios=aspect_ratios[2],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[2], this_offsets=offsets[2],
limit_boxes=limit_boxes, variances=variances, coords=coords, normalize_coords=normalize_coords, name='anchors6')(boxes6)
anchors7 = AnchorBoxes(img_height, img_width, this_scale=scales[3], next_scale=scales[4], aspect_ratios=aspect_ratios[3],
two_boxes_for_ar1=two_boxes_for_ar1, this_steps=steps[3], this_offsets=offsets[3],
limit_boxes=limit_boxes, variances=variances, coords=coords, normalize_coords=normalize_coords, name='anchors7')(boxes7)
# Reshape the class predictions, yielding 3D tensors of shape `(batch, height * width * n_boxes, n_classes)`
# We want the classes isolated in the last axis to perform softmax on them
classes4_reshaped = Reshape((-1, n_classes), name='classes4_reshape')(classes4)
classes5_reshaped = Reshape((-1, n_classes), name='classes5_reshape')(classes5)
classes6_reshaped = Reshape((-1, n_classes), name='classes6_reshape')(classes6)
classes7_reshaped = Reshape((-1, n_classes), name='classes7_reshape')(classes7)
# Reshape the box coordinate predictions, yielding 3D tensors of shape `(batch, height * width * n_boxes, 4)`
# We want the four box coordinates isolated in the last axis to compute the smooth L1 loss
boxes4_reshaped = Reshape((-1, 4), name='boxes4_reshape')(boxes4)
boxes5_reshaped = Reshape((-1, 4), name='boxes5_reshape')(boxes5)
boxes6_reshaped = Reshape((-1, 4), name='boxes6_reshape')(boxes6)
boxes7_reshaped = Reshape((-1, 4), name='boxes7_reshape')(boxes7)
# Reshape the anchor box tensors, yielding 3D tensors of shape `(batch, height * width * n_boxes, 8)`
anchors4_reshaped = Reshape((-1, 8), name='anchors4_reshape')(anchors4)
anchors5_reshaped = Reshape((-1, 8), name='anchors5_reshape')(anchors5)
anchors6_reshaped = Reshape((-1, 8), name='anchors6_reshape')(anchors6)
anchors7_reshaped = Reshape((-1, 8), name='anchors7_reshape')(anchors7)
# Concatenate the predictions from the different layers and the assosciated anchor box tensors
# Axis 0 (batch) and axis 2 (n_classes or 4, respectively) are identical for all layer predictions,
# so we want to concatenate along axis 1
# Output shape of `classes_concat`: (batch, n_boxes_total, n_classes)
classes_concat = Concatenate(axis=1, name='classes_concat')([classes4_reshaped,
classes5_reshaped,
classes6_reshaped,
classes7_reshaped])
# Output shape of `boxes_concat`: (batch, n_boxes_total, 4)
boxes_concat = Concatenate(axis=1, name='boxes_concat')([boxes4_reshaped,
boxes5_reshaped,
boxes6_reshaped,
boxes7_reshaped])
# Output shape of `anchors_concat`: (batch, n_boxes_total, 8)
anchors_concat = Concatenate(axis=1, name='anchors_concat')([anchors4_reshaped,
anchors5_reshaped,
anchors6_reshaped,
anchors7_reshaped])
# The box coordinate predictions will go into the loss function just the way they are,
# but for the class predictions, we'll apply a softmax activation layer first
classes_softmax = Activation('softmax', name='classes_softmax')(classes_concat)
# Concatenate the class and box coordinate predictions and the anchors to one large predictions tensor
# Output shape of `predictions`: (batch, n_boxes_total, n_classes + 4 + 8)
predictions = Concatenate(axis=2, name='predictions')([classes_softmax, boxes_concat, anchors_concat])
model = Model(inputs=x, outputs=predictions)
if return_predictor_sizes:
# Get the spatial dimensions (height, width) of the convolutional predictor layers, we need them to generate the default boxes
# The spatial dimensions are the same for the `classes` and `boxes` predictors
predictor_sizes = np.array([classes4._keras_shape[1:3],
classes5._keras_shape[1:3],
classes6._keras_shape[1:3],
classes7._keras_shape[1:3]])
return model, predictor_sizes
else:
return model
| [
"yopedro1989@gmail.com"
] | yopedro1989@gmail.com |
00771ef6f76b859f25d36894db8880b12c753980 | 2dfca672ccbfdee96fcad1668ca830d3c3163a63 | /regular expression/unicode_utf8.py | a7991f609702daa82b21a2d6edb0764e690205f1 | [] | no_license | lingtao13/python_learning | 9918c3266cc6bca6976d2ba296147665515a435d | 4cbe6ce12444841a2400d3b96b564bfebaddd10d | refs/heads/master | 2021-09-05T10:03:25.456150 | 2018-01-26T08:27:44 | 2018-01-26T08:27:44 | 119,025,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10 | py |
"""
""" | [
"nelsonpeng13@gmail.com"
] | nelsonpeng13@gmail.com |
1fa254abe43a00a7db476de18250c92b887b03bd | 0b413c6ee41ebbda5bef70735e7e80f715f3db6f | /managers.py | a298b54c5f864636709bfedf12e8b639c4224fa6 | [] | no_license | mcolyer/baseradix | dbc14bce0db87c878fc2134ac704e9a9ef074ad2 | 9a160db9d13228bbc55c720bf050b6117d477897 | refs/heads/master | 2021-01-23T07:33:47.059705 | 2007-01-18T04:23:35 | 2007-01-18T04:23:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,484 | py | import pygame, os
import random
import utility
from xml.dom import minidom
from xml.dom.minidom import Document
from xml import xpath
#The class that popped this one's cherry
class NetworkManager:
def __init__(self, network, objects, minmax):
self.network = network
self.objects = objects
self.minmax = minmax
def manage(self):
for node in self.network.keys():
if not node.stop and not node.talking:
if node.rest:
node.wait += 1
if node.wait > 300:
node.wait = 0
node.rest = 0
else:
friendlist = utility.distance(node, self.network)
if friendlist:
node.stop = 1
for friend in friendlist:
friend.stop = 1
prevPos = node.rect.topleft
self.walkPath(node)
if node.collide(self.objects):
node.rect.topleft = prevPos
if node.reverse:
node.reverse = 0
else:
node.reverse = 1
else:
node.wait += 1
if node.wait > 300:
node.wait = 0
node.stop = 0
node.rest = 1
#Called from main to setup predefined paths.
def findPaths(self):
for npc in self.network.keys():
npc.pathindex = 1
npc.walkindex = 1
npc.reverse = 0
paths = {}
friendlist = self.network[npc]
i = 1
for friend in friendlist:
path = self.pathFind(npc, friend)
paths[i] = path
i += 1
## pathcheck = 0
## for path in paths.values():
## if len(path) > 10:
## pathcheck = 1
#artificial paths here in here in case all paths to friends
#are blocked by doodads, just try 4 cardinals and 4 diagonals
# if not pathcheck:
morepaths = self.fakePaths(npc)
for path in morepaths:
paths[i] = path
i += 1
paths[0] = len(paths.keys())
npc.paths = paths
#find a path to each friend and record it for later use
def pathFind(self, node, friend):
stepsize = random.choice(range(1,3,1))
firstpos = node.rect.topleft
#nodepos = node.rect.topleft
friendpos = friend.rect.topleft
posdict = {}
deltax = ((friendpos[0] - firstpos[0])**2)**.5
deltay = ((friendpos[1] - firstpos[1])**2)**.5
xsteps = range(0, int(deltax), stepsize)
ysteps = range(0, int(deltay), stepsize)
maxy = len(ysteps)
maxx = len(xsteps)
i = 1
yin = 0
xin = 0
for xstep in xsteps:
if yin < maxy:
ydir = stepsize
yin += 1
else:
ydir = 0
posdict[i] = [stepsize, ydir]
i += 1
xin += 1
if yin <= maxy:
for step in range(0,maxy - yin, 1):
if xin < maxx:
xdir = stepsize
xin += 1
else:
xdir = 0
posdict[i] = [xdir, stepsize]
i += 1
yin += 1
posdict[0] = len(posdict.keys())
return posdict
def fakePaths(self, node):
paths = []
possiblepaths = range(300,1000,10)
possiblesteps = range(1,3,1)
xvals = [0,1,-1]
yvals = [0,1,-1]
for x in xvals:
for y in yvals:
if x == 0 and y == 0:
continue
i = 1
movedict = {}
stepsize = random.choice(possiblesteps)
pathlength = random.choice(possiblepaths)
steps = range(0, pathlength, stepsize)
for step in steps:
movedict[i] = [x*stepsize, y*stepsize]
i += 1
movedict[0] = len(movedict.keys())
paths.append(movedict)
return paths
def walkPath(self, node):
#if node.pathindex >= len(node.paths):
if node.pathindex > node.paths[0]:
node.pathindex = 1
if node.reverse:
node.rect.move_ip(node.paths[node.pathindex][node.walkindex])
node.walkindex -= 1
if node.walkindex <= 1:
node.pathindex += 1
node.reverse = 0
node.walkindex = 1
return
else:
x = -node.paths[node.pathindex][node.walkindex][0]
y = -node.paths[node.pathindex][node.walkindex][1]
node.rect.move_ip(x, y)
node.walkindex += 1
#if node.walkindex >= len(node.paths[node.pathindex]):
if node.walkindex > node.paths[node.pathindex][0]:
node.reverse = 1
node.walkindex -= 1
class KeyManager:
def __init__(self, player):
self.keystates = {}
self.player = player
def detect(self, menu):
for event in pygame.event.get():
if event.type == pygame.locals.QUIT:
return 1
elif event.type == pygame.locals.KEYDOWN and event.key == pygame.locals.K_ESCAPE:
return 1
elif event.type == pygame.locals.KEYDOWN:
self.keystates[event.key] = True
continue
#self.player.execute(self.keystates, self.player)
elif event.type == pygame.locals.KEYUP:
self.keystates[event.key] = False
continue
#self.player.execute(self.keystates, self.player)
else:
continue
#This class manages the images in the game so that only one instance of each exists.
#This ensures that we don't have 100+ instances of the same image loaded in memory.
#All sprites and things should request their images through this thing.
class ImageManager:
def __init__(self):
self.imgDict = {}
def getFullPath(self, name, dirList=[]):
fullName = ''
dirList = dirList + [name]
for dirPiece in dirList:
fullName = os.path.join(fullName, dirPiece)
return fullName
def getImage(self, name, dirList=[], colorKey=None):
#Request an image object reference (uniqueness by filepath and colorKey)
imgKey = (self.getFullPath(name, dirList), colorKey)
if not self.imgDict.has_key(imgKey):
self.imgDict[imgKey] = utility.loadImage(name, dirList, colorKey)
return self.imgDict[imgKey]
class ConversationManager:
def __init__(self):
self.reset()
self.size = 24
self.color = (255,255,255)
self.wrapLen = 40
def setText(self, text):
self.setLines(utility.wordWrap(text, self.wrapLen))
def setPosFromTalkerRect(self, tRect):
#Pass this the rect of the talking sprite to position the text below
self.__posTop = tRect.bottom + 10
self.__posCenterX = tRect.centerx
def setLines(self, lines):
self.reset()
self.__lineSurfs = [ utility.getTextSurface(line, self.size, self.color) for line in lines ]
for line in self.__lineSurfs:
lineRect = line.get_rect()
self.__lineRects.append(lineRect)
if lineRect.width > self.__maxwidth:
self.__maxwidth = lineRect.width
self.__totalHeight = self.__totalHeight + lineRect.height
def reset(self):
self.__posTop = 0
self.__posCenterX = 0
self.__totalHeight = 0
self.__maxwidth = 0
self.__lineSurfs = []
self.__lineRects = []
def draw(self, surf, screenRect):
if not self.__lineSurfs:
return
curY = self.__posTop - screenRect.top
centX = self.__posCenterX - screenRect.left
for ind in range(len(self.__lineSurfs)):
curRect = self.__lineRects[ind]
curSurf = self.__lineSurfs[ind]
curRect.top = curY
curY = curY + curRect.height
curRect.left = int(centX-(curRect.width/2))
surf.blit(curSurf, curRect)
# vim:set ts=4 expandtab nowrap:
| [
"mcolyer@c41ea901-431d-0410-89d7-be7f727d2478"
] | mcolyer@c41ea901-431d-0410-89d7-be7f727d2478 |
920d2263cbeb1e5be4d7cfac31f5ccec2fafdc5a | 1f0831db24ae2772d4944faf05289599bb37aca7 | /data_crawling/08/api/setup.py | 4d6fe9c915145a8327f8ea7ba8e946c9660fc6d8 | [] | no_license | smaystr/rails_reactor | 2123f39ae97f38acb647363979fe4a09b896670e | 69c8aac5860527768b4a8b7bce027b9dea6b1989 | refs/heads/master | 2022-08-19T05:35:21.535933 | 2019-08-28T12:46:22 | 2019-08-28T12:46:22 | 189,264,026 | 1 | 0 | null | 2022-07-29T22:34:56 | 2019-05-29T16:47:08 | Jupyter Notebook | UTF-8 | Python | false | false | 660 | py | from setuptools import setup
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name="sergey_milantiev_crawler_master",
version="0.0.0",
install_requires=requirements,
packages=["app"],
author="sergey.milantiev@gmail.com",
url="",
download_url="",
description="CRAWLER DOMRIA API",
long_description="",
license="MIT",
keywords="",
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.7",
],
)
| [
"smaystr@gmail.com"
] | smaystr@gmail.com |
4973a275a2a793d84da7440f0dbb6675e210b2fc | 4bc44d83a8f7f6818b8df8f330836fc55a22b1ff | /models/costvolume_regularization.py | a059b39b7f4aff55a6be95b2e35e59562ce8ea31 | [] | no_license | matsuren/omnimvs_pytorch | 2bdd1e0fdd4c052fb786e4fab24f1d02bec800b6 | 82bab981a784d37ab6ed135739fb696aa9304168 | refs/heads/master | 2022-11-04T13:19:11.491990 | 2022-10-29T07:37:12 | 2022-10-29T07:37:12 | 232,974,747 | 55 | 12 | null | null | null | null | UTF-8 | Python | false | false | 3,015 | py | import torch
import torch.nn as nn
def conv_3d_relu(inplanes, planes, kernel_size, stride, pad):
return nn.Sequential(
nn.Conv3d(inplanes, planes, kernel_size=kernel_size, stride=stride, padding=pad, bias=False),
nn.ReLU(inplace=True))
class CostVolumeComputation(nn.Module):
def __init__(self, inplanes=64):
super(CostVolumeComputation, self).__init__()
# conv
planes = 64
self.conv1 = conv_3d_relu(inplanes, planes, 3, 1, 1)
self.conv23 = self._make_layer(conv_3d_relu, planes, 2)
planes = 64 * 2
self.conv4 = conv_3d_relu(planes // 2, planes, 3, 2, 1)
self.conv56 = self._make_layer(conv_3d_relu, planes, 2)
self.conv7 = conv_3d_relu(planes, planes, 3, 2, 1)
self.conv89 = self._make_layer(conv_3d_relu, planes, 2)
self.conv10 = conv_3d_relu(planes, planes, 3, 2, 1)
self.conv11_12 = self._make_layer(conv_3d_relu, planes, 2)
planes = 128 * 2
self.conv13 = conv_3d_relu(planes // 2, planes, 3, 2, 1)
self.conv14_15 = self._make_layer(conv_3d_relu, planes, 2)
# deconv
planes = 128
self.deconv1 = nn.ConvTranspose3d(planes * 2, planes, 3, 2, 1, bias=False)
self.relu1 = nn.ReLU(inplace=True)
self.deconv2 = nn.ConvTranspose3d(planes, planes, 3, 2, 1, bias=False)
self.relu2 = nn.ReLU(inplace=True)
self.deconv3 = nn.ConvTranspose3d(planes, planes, 3, 2, 1, bias=False)
self.relu3 = nn.ReLU(inplace=True)
planes = 64
self.deconv4 = nn.ConvTranspose3d(planes * 2, planes, 3, 2, 1, bias=False)
self.relu4 = nn.ReLU(inplace=True)
self.deconv5 = nn.ConvTranspose3d(planes, 1, 3, 2, 1, bias=False)
def _make_layer(self, block, planes, blocks):
layers = []
for _ in range(blocks):
layers.append(block(planes, planes, 3, 1, 1))
return nn.Sequential(*layers)
def forward(self, x):
# conv
conv1 = self.conv1(x)
conv3 = self.conv23(conv1)
conv4 = self.conv4(conv1)
conv6 = self.conv56(conv4)
conv7 = self.conv7(conv4)
conv9 = self.conv89(conv7)
conv10 = self.conv10(conv7)
conv12 = self.conv11_12(conv10)
conv13 = self.conv13(conv10)
conv15 = self.conv14_15(conv13)
# deconv
deconv1 = self.deconv1(conv15, output_size=conv12.size())
deconv1 = self.relu1(deconv1 + conv12)
deconv2 = self.deconv2(deconv1, output_size=conv9.size())
deconv2 = self.relu2(deconv2 + conv9)
deconv3 = self.deconv3(deconv2, output_size=conv6.size())
deconv3 = self.relu3(deconv3 + conv6)
deconv4 = self.deconv4(deconv3, output_size=conv3.size())
deconv4 = self.relu4(deconv4 + conv3)
# final upsample
d, h, w = deconv4.size()[2:]
output_size = torch.Size([2 * d, 2 * h, 2 * w])
deconv5 = self.deconv5(deconv4, output_size=output_size)
return deconv5
| [
"komatsu@robot.t.u-tokyo.ac.jp"
] | komatsu@robot.t.u-tokyo.ac.jp |
9235b80e5ef760386db087cbeb1eedcff79edbd7 | def1b645cf84f25f746926771b7798215b505514 | /codereview/models.py | 446f6066db2bf1f7ab0a358eae2b61a87b4fc8ae | [
"Apache-2.0"
] | permissive | ojengwa/codereview | 07770b3dbe9e882749ff013a7dba9241e99b0ad5 | 23e3e6654fc09084724ddaa33d982df98e5e5a7b | refs/heads/master | 2021-01-17T07:27:46.977826 | 2015-10-13T14:46:26 | 2015-10-13T14:46:26 | 51,166,443 | 1 | 0 | null | 2016-02-05T18:40:31 | 2016-02-05T18:40:31 | null | UTF-8 | Python | false | false | 29,543 | py | # Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App Engine data model (schema) definition for Rietveld."""
import logging
from hashlib import md5
import os
import re
import time
from google.appengine.ext import db
from google.appengine.api import memcache
from google.appengine.api import users
from django.utils.encoding import force_unicode
import engine
import patching
CONTEXT_CHOICES = (3, 10, 25, 50, 75, 100)
# GQL query cache ###
_query_cache = {}
def gql(cls, clause, *args, **kwds):
"""Return a query object, from the cache if possible.
Args:
cls: a db.Model subclass.
clause: a query clause, e.g. 'WHERE draft = TRUE'.
*args, **kwds: positional and keyword arguments to be bound to the query.
Returns:
A db.GqlQuery instance corresponding to the query with *args and
**kwds bound to the query.
"""
query_string = 'SELECT * FROM %s %s' % (cls.kind(), clause)
query = _query_cache.get(query_string)
if query is None:
_query_cache[query_string] = query = db.GqlQuery(query_string)
query.bind(*args, **kwds)
return query
# Issues, PatchSets, Patches, Contents, Comments, Messages ###
class Issue(db.Model):
"""The major top-level entity.
It has one or more PatchSets as its descendants.
"""
subject = db.StringProperty(required=True)
description = db.TextProperty()
#: in Subversion - repository path (URL) for files in patch set
base = db.StringProperty()
#: if True then base files for patches were uploaded with upload.py
#: (if False - then Rietveld attempts to download them from server)
local_base = db.BooleanProperty(default=False)
repo_guid = db.StringProperty()
owner = db.UserProperty(auto_current_user_add=True, required=True)
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
reviewers = db.ListProperty(db.Email)
cc = db.ListProperty(db.Email)
closed = db.BooleanProperty(default=False)
private = db.BooleanProperty(default=False)
n_comments = db.IntegerProperty()
latest_patch_rev = db.StringProperty(required=False)
latest_reviewed_rev = db.StringProperty(required=False)
processing = db.BooleanProperty(default=False)
_is_starred = None
class Meta:
permissions = (
("view_issue", "View issue"),
)
@property
def is_starred(self):
"""Whether the current user has this issue starred."""
if self._is_starred is not None:
return self._is_starred
account = Account.current_user_account
self._is_starred = account is not None and self.key(
).id() in account.stars
return self._is_starred
def user_can_edit(self, user):
"""Return true if the given user has permission to edit this issue."""
return user == self.owner or user.has_perm('codereview.change_issue')
@property
def edit_allowed(self):
"""Whether the current user can edit this issue."""
account = Account.current_user_account
if account is None:
return False
return self.user_can_edit(account.user)
def update_comment_count(self, n):
"""Increment the n_comments property by n.
If n_comments in None, compute the count through a query. (This
is a transitional strategy while the database contains Issues
created using a previous version of the schema.)
"""
if self.n_comments is None:
self.n_comments = self._get_num_comments()
self.n_comments += n
@property
def num_comments(self):
"""The number of non-draft comments for this issue.
This is almost an alias for self.n_comments, except that if
n_comments is None, it is computed through a query, and stored,
using n_comments as a cache.
"""
if self.n_comments is None:
self.n_comments = self._get_num_comments()
return self.n_comments
def _get_num_comments(self):
"""Helper to compute the number of comments through a query."""
return gql(Comment,
'WHERE ANCESTOR IS :1 AND draft = FALSE',
self).count()
_num_drafts = None
@property
def num_drafts(self):
"""The number of draft comments on this issue for the current user.
The value is expensive to compute, so it is cached.
"""
if self._num_drafts is None:
account = Account.current_user_account
if account is None:
self._num_drafts = 0
else:
query = gql(Comment,
'WHERE ANCESTOR IS :1 AND author = :2 AND draft = TRUE',
self, account.user)
self._num_drafts = query.count()
return self._num_drafts
@property
def patchsets(self):
"""Get issue patchsets in order."""
return self.patchset_set.order('created')
@property
def latest_patchset(self):
"""Get latest patchset."""
try:
return self.patchsets.reverse()[0]
except IndexError:
return None
@property
def latest_patchset_number(self):
"""Get latest patchset number."""
try:
return list(self.patchsets).index(self.latest_patchset) + 1
except ValueError:
return None
@property
def latest_approved_patchset(self):
"""Get latest approved patchset."""
if self.latest_reviewed_rev:
try:
return self.patchsets.reverse().filter(revision=self.latest_reviewed_rev)[0]
except IndexError:
return None
@property
def latest_approved_patchset_number(self):
"""Get latest approved patchset number."""
try:
return list(self.patchsets).index(self.latest_approved_patchset) + 1
except ValueError:
return None
class PatchSet(db.Model):
"""A set of patchset uploaded together.
This is a descendant of an Issue and has Patches as descendants.
"""
issue = db.ReferenceProperty(Issue) # == parent
message = db.StringProperty()
data = db.TextProperty() # blob
url = db.LinkProperty()
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
n_comments = db.IntegerProperty(default=0)
revision = db.StringProperty(required=False)
class Meta:
permissions = (
("approve_patchset", "Approve patchset"),
)
def update_comment_count(self, n):
"""Increment the n_comments property by n."""
self.n_comments = self.num_comments + n
@property
def num_comments(self):
"""The number of non-draft comments for this issue.
This is almost an alias for self.n_comments, except that if
n_comments is None, 0 is returned.
"""
# For older patchsets n_comments is None.
return self.n_comments or 0
class Message(db.Model):
"""A copy of a message sent out in email.
This is a descendant of an Issue.
"""
issue = db.ReferenceProperty(Issue) # == parent
subject = db.StringProperty()
sender = db.EmailProperty()
recipients = db.ListProperty(db.Email)
date = db.DateTimeProperty(auto_now_add=True)
text = db.TextProperty()
draft = db.BooleanProperty(default=False)
_approval = None
@property
def approval(self):
"""Is True when the message represents an approval of the review."""
if self._approval is None:
# Must contain 'lgtm' in a line that doesn't start with '>'.
self._approval = any(
True for line in self.text.lower().splitlines()
if not line.strip().startswith('>') and 'lgtm' in line)
# Must not be issue owner.
self._approval &= self.issue.owner.email() != self.sender
return self._approval
class Content(db.Model):
"""The content of a text file.
This is a descendant of a Patch.
"""
# parent => Patch
text = db.TextProperty()
data = db.TextProperty() # blob
# Checksum over text or data depending on the type of this content.
checksum = db.TextProperty()
is_uploaded = db.BooleanProperty(default=False)
is_bad = db.BooleanProperty(default=False)
file_too_large = db.BooleanProperty(default=False)
@property
def lines(self):
"""The text split into lines, retaining line endings."""
if not self.text:
return []
return self.text.splitlines(True)
class Patch(db.Model):
"""A single patch, i.e. a set of changes to a single file.
This is a descendant of a PatchSet.
"""
patchset = db.ReferenceProperty(PatchSet) # == parent
filename = db.StringProperty()
old_filename = db.StringProperty()
status = db.StringProperty() # 'A', 'A +', 'M', 'D' etc
text = db.TextProperty()
content = db.ReferenceProperty(Content)
patched_content = db.ReferenceProperty(
Content, collection_name='patch2_set')
is_binary = db.BooleanProperty(default=False)
# Ids of patchsets that have a different version of this file.
delta = db.ListProperty(int)
delta_calculated = db.BooleanProperty(default=False)
_lines = None
@property
def lines(self):
"""The patch split into lines, retaining line endings.
The value is cached.
"""
if self._lines is not None:
return self._lines
if not self.text:
lines = []
else:
lines = self.text.splitlines(True)
self._lines = lines
return lines
_property_changes = None
@property
def property_changes(self):
"""The property changes split into lines.
The value is cached.
"""
if self._property_changes is not None:
return self._property_changes
self._property_changes = []
match = re.search(
'^Property changes on.*\n' + '_' * 67 + '$', self.text,
re.MULTILINE)
if match:
self._property_changes = self.text[match.end():].splitlines()
return self._property_changes
_num_added = None
@property
def num_added(self):
"""The number of line additions in this patch.
The value is cached.
"""
if self._num_added is None:
self._num_added = self.count_startswith('+') - 1
return self._num_added
_num_removed = None
@property
def num_removed(self):
"""The number of line removals in this patch.
The value is cached.
"""
if self._num_removed is None:
self._num_removed = self.count_startswith('-') - 1
return self._num_removed
_num_chunks = None
@property
def num_chunks(self):
"""The number of 'chunks' in this patch.
A chunk is a block of lines starting with '@@'.
The value is cached.
"""
if self._num_chunks is None:
self._num_chunks = self.count_startswith('@@')
return self._num_chunks
_num_comments = None
@property
def num_comments(self):
"""The number of non-draft comments for this patch.
The value is cached.
"""
if self._num_comments is None:
self._num_comments = gql(Comment,
'WHERE patch = :1 AND draft = FALSE',
self).count()
return self._num_comments
_num_drafts = None
@property
def num_drafts(self):
"""The number of draft comments on this patch for the current user.
The value is expensive to compute, so it is cached.
"""
if self._num_drafts is None:
account = Account.current_user_account
if account is None:
self._num_drafts = 0
else:
query = gql(Comment,
'WHERE patch = :1 AND draft = TRUE AND author = :2',
self, account.user)
self._num_drafts = query.count()
return self._num_drafts
def count_startswith(self, prefix):
"""Returns the number of lines with the specified prefix."""
return len([l for l in self.lines if l.startswith(prefix)])
def get_content(self):
"""Get self.content, or fetch it if necessary.
This is the content of the file to which this patch is relative.
Returns:
a Content instance.
Raises:
engine.FetchError: If there was a problem fetching it.
"""
try:
if self.content is not None:
if self.content.is_bad:
msg = 'Bad content. Try to upload again.'
logging.warn('Patch.get_content: %s', msg)
raise engine.FetchError(msg)
if self.content.is_uploaded and self.content.text is None:
msg = 'Upload in progress.'
logging.warn('Patch.get_content: %s', msg)
raise engine.FetchError(msg)
else:
return self.content
except db.Error:
# This may happen when a Content entity was deleted behind our
# back.
self.content = None
content = engine.FetchBase(self.patchset.issue.base, self)
content.put()
self.content = content
self.put()
return content
def get_patched_content(self):
"""Get self.patched_content, computing it if necessary.
This is the content of the file after applying this patch.
Returns:
a Content instance.
Raises:
engine.FetchError: If there was a problem fetching the old content.
"""
try:
if self.patched_content is not None:
return self.patched_content
except db.Error:
# This may happen when a Content entity was deleted behind our
# back.
self.patched_content = None
old_lines = self.get_content().text.splitlines(True)
logging.info('Creating patched_content for %s', self.filename)
chunks = patching.ParsePatchToChunks(self.lines, self.filename)
new_lines = []
for _, _, new in patching.PatchChunks(old_lines, chunks):
new_lines.extend(new)
text = db.Text(''.join(new_lines))
patched_content = Content(text=text, parent=self)
patched_content.put()
self.patched_content = patched_content
self.put()
return patched_content
@property
def no_base_file(self):
"""Returns True iff the base file is not available."""
return self.content and self.content.file_too_large
class Comment(db.Model):
"""A Comment for a specific line of a specific file.
This is a descendant of a Patch.
"""
patch = db.ReferenceProperty(Patch) # == parent
message_id = db.StringProperty() # == key_name
author = db.UserProperty(auto_current_user_add=True)
date = db.DateTimeProperty(auto_now=True)
lineno = db.IntegerProperty()
text = db.TextProperty()
left = db.BooleanProperty()
draft = db.BooleanProperty(required=True, default=True)
buckets = None
shorttext = None
def complete(self):
"""Set the shorttext and buckets attributes."""
# TODO(guido): Turn these into caching proprties instead.
# The strategy for buckets is that we want groups of lines that
# start with > to be quoted (and not displayed by
# default). Whitespace-only lines are not considered either quoted
# or not quoted. Same goes for lines that go like "On ... user
# wrote:".
cur_bucket = []
quoted = None
self.buckets = []
def _Append():
if cur_bucket:
self.buckets.append(Bucket(text="\n".join(cur_bucket),
quoted=bool(quoted)))
lines = self.text.splitlines()
for line in lines:
if line.startswith("On ") and line.endswith(":"):
pass
elif line.startswith(">"):
if quoted is False:
_Append()
cur_bucket = []
quoted = True
elif line.strip():
if quoted is True:
_Append()
cur_bucket = []
quoted = False
cur_bucket.append(line)
_Append()
self.shorttext = self.text.lstrip()[:50].rstrip()
# Grab the first 50 chars from the first non-quoted bucket
for bucket in self.buckets:
if not bucket.quoted:
self.shorttext = bucket.text.lstrip()[:50].rstrip()
break
class Bucket(db.Model):
"""A 'Bucket' of text.
A comment may consist of multiple text buckets, some of which may be
collapsed by default (when they represent quoted text).
NOTE: This entity is never written to the database. See Comment.complete().
"""
# TODO(guido): Flesh this out.
text = db.TextProperty()
quoted = db.BooleanProperty()
# Repositories and Branches ###
class Repository(db.Model):
"""A specific Subversion repository."""
name = db.StringProperty(required=True)
url = db.LinkProperty(required=True)
owner = db.UserProperty(auto_current_user_add=True)
guid = db.StringProperty() # global unique repository id
def __str__(self):
return self.name
class Branch(db.Model):
"""A trunk, branch, or a tag in a specific Subversion repository."""
repo = db.ReferenceProperty(Repository, required=True)
# Cache repo.name as repo_name, to speed up set_branch_choices()
# in views.IssueBaseForm.
repo_name = db.StringProperty()
category = db.StringProperty(required=True,
choices=('*trunk*', 'branch', 'tag'))
name = db.StringProperty(required=True)
url = db.LinkProperty(required=True)
owner = db.UserProperty(auto_current_user_add=True)
# Accounts ###
class Account(db.Model):
"""Maps a user or email address to a user-selected nickname, and more.
Nicknames do not have to be unique.
The default nickname is generated from the email address by
stripping the first '@' sign and everything after it. The email
should not be empty nor should it start with '@' (AssertionError
error is raised if either of these happens).
This also holds a list of ids of starred issues. The expectation
that you won't have more than a dozen or so starred issues (a few
hundred in extreme cases) and the memory used up by a list of
integers of that size is very modest, so this is an efficient
solution. (If someone found a use case for having thousands of
starred issues we'd have to think of a different approach.)
"""
user = db.UserProperty(auto_current_user_add=True, required=True)
email = db.EmailProperty(required=True) # key == <email>
nickname = db.StringProperty(required=True)
default_context = db.IntegerProperty(default=engine.DEFAULT_CONTEXT,
choices=CONTEXT_CHOICES)
default_column_width = db.IntegerProperty(
default=engine.DEFAULT_COLUMN_WIDTH)
created = db.DateTimeProperty(auto_now_add=True)
modified = db.DateTimeProperty(auto_now=True)
stars = db.ListProperty(int) # Issue ids of all starred issues
fresh = db.BooleanProperty()
uploadpy_hint = db.BooleanProperty(default=True)
notify_by_email = db.BooleanProperty(default=True)
notify_by_chat = db.BooleanProperty(default=False)
use_code_highlight = db.BooleanProperty(default=False)
# Current user's Account. Updated by
# middleware.AddUserToRequestMiddleware.
current_user_account = None
lower_email = db.StringProperty()
lower_nickname = db.StringProperty()
xsrf_secret = db.TextProperty() # blob
fogbugz_token = db.StringProperty()
"""Fogbuzg authorization toke."""
# Note that this doesn't get called when doing multi-entity puts.
def put(self):
self.lower_email = str(self.email).lower()
self.lower_nickname = self.nickname.lower()
super(Account, self).put()
@classmethod
def get_account_for_user(cls, user):
"""Get the Account for a user, creating a default one if needed."""
email = user.email()
assert email
key = '<%s>' % email
# Since usually the account already exists, first try getting it
# without the transaction implied by get_or_insert().
account = cls.get_by_key_name(key)
if account is not None:
return account
nickname = cls.create_nickname_for_user(user)
return cls.get_or_insert(
key, user=user, email=email, nickname=nickname,
fresh=True)
@classmethod
def create_nickname_for_user(cls, user):
"""Returns a unique nickname for a user."""
name = nickname = user.email().split('@', 1)[0]
next_char = chr(ord(nickname[0].lower()) + 1)
existing_nicks = [account.lower_nickname
for account in cls.gql(('WHERE lower_nickname >= :1 AND '
'lower_nickname < :2'),
nickname.lower(), next_char)]
suffix = 0
while nickname.lower() in existing_nicks:
suffix += 1
nickname = '%s%d' % (name, suffix)
return nickname
@classmethod
def get_nickname_for_user(cls, user):
"""Get the nickname for a user."""
return cls.get_account_for_user(user).nickname
@classmethod
def get_account_for_email(cls, email):
"""Get the Account for an email address, or return None."""
assert email
key = '<%s>' % email
return cls.get_by_key_name(key)
@classmethod
def get_accounts_for_emails(cls, emails):
"""Get the Accounts for each of a list of email addresses."""
return cls.get_by_key_name(['<%s>' % email for email in emails])
@classmethod
def get_by_key_name(cls, key, **kwds):
"""Override db.Model.get_by_key_name() to use cached value if possible."""
if not kwds and cls.current_user_account is not None:
if key == cls.current_user_account.key().name():
return cls.current_user_account
return super(Account, cls).get_by_key_name(key, **kwds)
@classmethod
def get_multiple_accounts_by_email(cls, emails):
"""Get multiple accounts. Returns a dict by email."""
results = {}
keys = []
for email in emails:
if cls.current_user_account and email == cls.current_user_account.email:
results[email] = cls.current_user_account
else:
keys.append('<%s>' % email)
if keys:
accounts = cls.get_by_key_name(keys)
for account in accounts:
if account is not None:
results[account.email] = account
return results
@classmethod
def get_nickname_for_email(cls, email, default=None):
"""Get the nickname for an email address, possibly a default.
If default is None a generic nickname is computed from the email
address.
Args:
email: email address.
default: If given and no account is found, returned as the default value.
Returns:
Nickname for given email.
"""
account = cls.get_account_for_email(email)
if account is not None and account.nickname:
return account.nickname
if default is not None:
return default
return email.replace('@', '_')
@classmethod
def get_account_for_nickname(cls, nickname):
"""Get the list of Accounts that have this nickname."""
assert nickname
assert '@' not in nickname
return cls.all().filter('lower_nickname =', nickname.lower()).get()
@classmethod
def get_email_for_nickname(cls, nickname):
"""Turn a nickname into an email address.
If the nickname is not unique or does not exist, this returns None.
"""
account = cls.get_account_for_nickname(nickname)
if account is None:
return None
return account.email
def user_has_selected_nickname(self):
"""Return True if the user picked the nickname.
Normally this returns 'not self.fresh', but if that property is
None, we assume that if the created and modified timestamp are
within 2 seconds, the account is fresh (i.e. the user hasn't
selected a nickname yet). We then also update self.fresh, so it
is used as a cache and may even be written back if we're lucky.
"""
if self.fresh is None:
delta = self.created - self.modified
# Simulate delta = abs(delta)
if delta.days < 0:
delta = -delta
self.fresh = (delta.days == 0 and delta.seconds < 2)
return not self.fresh
_drafts = None
@property
def drafts(self):
"""A list of issue ids that have drafts by this user.
This is cached in memcache.
"""
if self._drafts is None:
if self._initialize_drafts():
self._save_drafts()
return self._drafts
def update_drafts(self, issue, have_drafts=None):
"""Update the user's draft status for this issue.
Args:
issue: an Issue instance.
have_drafts: optional bool forcing the draft status. By default,
issue.num_drafts is inspected (which may query the datastore).
The Account is written to the datastore if necessary.
"""
dirty = False
if self._drafts is None:
dirty = self._initialize_drafts()
id = issue.key().id()
if have_drafts is None:
# Beware, this may do a query.
have_drafts = bool(issue.num_drafts)
if have_drafts:
if id not in self._drafts:
self._drafts.append(id)
dirty = True
else:
if id in self._drafts:
self._drafts.remove(id)
dirty = True
if dirty:
self._save_drafts()
def _initialize_drafts(self):
"""Initialize self._drafts from scratch.
This mostly exists as a schema conversion utility.
Returns:
True if the user should call self._save_drafts(), False if not.
"""
drafts = memcache.get('user_drafts:' + self.email)
if drafts is not None:
self._drafts = drafts
# logging.info('HIT: %s -> %s', self.email, self._drafts)
return False
# We're looking for the Issue key id. The ancestry of comments goes:
# Issue -> PatchSet -> Patch -> Comment.
issue_ids = set(comment.key().parent().parent().parent().id()
for comment in gql(Comment,
'WHERE author = :1 AND draft = TRUE',
self.user))
self._drafts = list(issue_ids)
# logging.info('INITIALIZED: %s -> %s', self.email, self._drafts)
return True
def _save_drafts(self):
"""Save self._drafts to memcache."""
# logging.info('SAVING: %s -> %s', self.email, self._drafts)
memcache.set('user_drafts:' + self.email, self._drafts, 3600)
def get_xsrf_token(self, offset=0):
"""Return an XSRF token for the current user."""
# This code assumes that
# self.user.email() == users.get_current_user().email()
current_user = users.get_current_user()
if self.user.id != current_user.id:
# Mainly for Google Account plus conversion.
logging.info('Updating user_id for %s from %s to %s' % (
self.user.email(), self.user.id, current_user.id))
self.user = current_user
self.put()
if not self.xsrf_secret:
xsrf_secret = os.urandom(8)
self.xsrf_secret = xsrf_secret
self.put()
m = md5(
force_unicode(self.xsrf_secret).encode('utf-8') if isinstance(self.xsrf_secret, unicode)
else self.xsrf_secret)
email_str = self.lower_email
if isinstance(email_str, unicode):
email_str = email_str.encode('utf-8')
m.update(self.lower_email)
when = int(time.time()) // 3600 + offset
m.update(str(when))
return m.hexdigest()
| [
"bubenkoff@gmail.com"
] | bubenkoff@gmail.com |
2b92a7e9d9427602b561e5c1abd7a366a042b7df | 52bea59c6686c2ec260785c3fbc81f0349764e3d | /demo11.py | ec1972a49bba3f89c1877714280f15fb46411c46 | [] | no_license | harshwardhan-bit/python-programming | b834696ba518b938abfa4748d6b071b288b5d8f5 | 9e8fb03ce83e80197f475319bebd7a5b286bc01d | refs/heads/master | 2022-04-25T17:14:48.501288 | 2020-03-26T17:13:20 | 2020-03-26T17:13:20 | 250,322,099 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35 | py | a=13.5
b=2.2
c = a//b
print(c)
| [
"noreply@github.com"
] | harshwardhan-bit.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.