index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
23,000 | 8f69f0e9053d9edd75633b29628bd0a5aaf948e9 | from seabreeze.pyseabreeze.features._base import SeaBreezeFeature
# Definition
# ==========
#
# TODO: This feature needs to be implemented for pyseabreeze
#
class SeaBreezeGPIOFeature(SeaBreezeFeature):
identifier = "gpio"
modes = dict(
GPIO_OUTPUT_PUSH_PULL=0x00,
GPIO_OPEN_DRAIN_OUTPUT=0x01,
DAC_OUTPUT=0x02,
GPIO_INPUT_HIGH_Z=0x80,
GPIO_INPUT_PULL_DOWN=0x81,
ADC_INPUT=0x82,
)
def get_number_of_gpio_pins(self):
raise NotImplementedError("implement in derived class")
def get_gpio_output_enable_vector(self):
raise NotImplementedError("implement in derived class")
def set_gpio_output_enable_vector(self, output_enable_vector, bit_mask):
raise NotImplementedError("implement in derived class")
def get_gpio_value_vector(self):
raise NotImplementedError("implement in derived class")
def set_gpio_value_vector(self, value_vector, bit_mask):
raise NotImplementedError("implement in derived class")
def get_number_of_egpio_pins(self):
raise NotImplementedError("implement in derived class")
def get_egpio_available_modes(self, pin_number):
raise NotImplementedError("implement in derived class")
def get_egpio_current_mode(self, pin_number):
raise NotImplementedError("implement in derived class")
def set_egpio_mode(self, pin_number, mode, value=0.0):
raise NotImplementedError("implement in derived class")
def get_egpio_output_vector_vector(self):
raise NotImplementedError("implement in derived class")
def set_egpio_output_vector(self, output_vector, bit_mask):
raise NotImplementedError("implement in derived class")
def get_egpio_value(self, pin_number):
raise NotImplementedError("implement in derived class")
def set_egpio_value(self, pin_number, value):
raise NotImplementedError("implement in derived class")
|
23,001 | 4b87fd8b39b6d20e5f36dc5e185b118416884804 | class Visitor(object):
pass
class Eval(Visitor):
def visit_int(self, i):
return i.value
def visit_add(self, a):
return a.left.accept(self) + a.right.accept(self)
def visit_mul(self, a):
return a.left.accept(self) * a.right.accept(self)
class Print(Visitor):
def visit_int(self, i):
return i.value
def visit_add(self, a):
return "(+ {} {})".format(a.left.accept(self), a.right.accept(self))
def visit_mul(self, a):
return "(* {} {})".format(a.left.accept(self), a.right.accept(self))
|
23,002 | ba52e44611c1b092e167dba9bb821d3c677f911f | # Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: ags_py_codegen
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import HeaderStr
from ....core import get_namespace as get_services_namespace
from ....core import run_request
from ....core import run_request_async
from ....core import same_doc_as
from ..models import ADTOForUnbanUserAPICall
from ..models import ADTOObjectForEqu8UserBanStatus
from ..models import ADTOObjectForEqu8UserStatus
from ..models import Action
from ..models import ErrorEntity
from ..models import UserBanRequest
from ..models import UserReportRequest
from ..models import ValidationErrorEntity
from ..operations.user_action import BanUsers
from ..operations.user_action import GetActions
from ..operations.user_action import GetBannedUsers
from ..operations.user_action import GetUserStatus
from ..operations.user_action import PublicReportUser
from ..operations.user_action import ReportUser
from ..operations.user_action import UnBanUsers
@same_doc_as(BanUsers)
def ban_users(
body: Optional[UserBanRequest] = None,
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Ban user(temporarily or permanently) (banUsers)
Ban user.
actionId: 1 means permanent ban, actionId: 10 means Temporary ban.Other detail info:
* Required permission : resource= "ADMIN:NAMESPACE:{namespace}:ACTION" , action=4 (UPDATE)
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:ACTION [UPDATE]
Properties:
url: /basic/v1/admin/namespaces/{namespace}/actions/ban
method: POST
tags: ["UserAction"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
body: (body) OPTIONAL UserBanRequest in body
namespace: (namespace) REQUIRED str in path
Responses:
200: OK - (successful operation)
400: Bad Request - ErrorEntity (11621: Invalid EQU8 api key in namespace [{namespace}])
404: Not Found - ErrorEntity (11041: Equ8 config not found in namespace [{namespace}])
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
500: Internal Server Error - ErrorEntity (20000: internal server error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = BanUsers.create(
body=body,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(BanUsers)
async def ban_users_async(
body: Optional[UserBanRequest] = None,
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Ban user(temporarily or permanently) (banUsers)
Ban user.
actionId: 1 means permanent ban, actionId: 10 means Temporary ban.Other detail info:
* Required permission : resource= "ADMIN:NAMESPACE:{namespace}:ACTION" , action=4 (UPDATE)
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:ACTION [UPDATE]
Properties:
url: /basic/v1/admin/namespaces/{namespace}/actions/ban
method: POST
tags: ["UserAction"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
body: (body) OPTIONAL UserBanRequest in body
namespace: (namespace) REQUIRED str in path
Responses:
200: OK - (successful operation)
400: Bad Request - ErrorEntity (11621: Invalid EQU8 api key in namespace [{namespace}])
404: Not Found - ErrorEntity (11041: Equ8 config not found in namespace [{namespace}])
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
500: Internal Server Error - ErrorEntity (20000: internal server error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = BanUsers.create(
body=body,
namespace=namespace,
)
return await run_request_async(
request, additional_headers=x_additional_headers, **kwargs
)
@same_doc_as(GetActions)
def get_actions(
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Get configured actions (getActions)
Get configured actions.
Other detail info:
* Required permission : resource= "ADMIN:NAMESPACE:{namespace}:ACTION" , action=2 (READ)
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:ACTION [READ]
Properties:
url: /basic/v1/admin/namespaces/{namespace}/actions
method: GET
tags: ["UserAction"]
consumes: []
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
namespace: (namespace) REQUIRED str in path
Responses:
200: OK - List[Action] (successful operation)
400: Bad Request - ErrorEntity (11621: Invalid EQU8 api key in namespace [{namespace}])
404: Not Found - ErrorEntity (11041: Equ8 config not found in namespace [{namespace}])
500: Internal Server Error - ErrorEntity (20000: internal server error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = GetActions.create(
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(GetActions)
async def get_actions_async(
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Get configured actions (getActions)
Get configured actions.
Other detail info:
* Required permission : resource= "ADMIN:NAMESPACE:{namespace}:ACTION" , action=2 (READ)
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:ACTION [READ]
Properties:
url: /basic/v1/admin/namespaces/{namespace}/actions
method: GET
tags: ["UserAction"]
consumes: []
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
namespace: (namespace) REQUIRED str in path
Responses:
200: OK - List[Action] (successful operation)
400: Bad Request - ErrorEntity (11621: Invalid EQU8 api key in namespace [{namespace}])
404: Not Found - ErrorEntity (11041: Equ8 config not found in namespace [{namespace}])
500: Internal Server Error - ErrorEntity (20000: internal server error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = GetActions.create(
namespace=namespace,
)
return await run_request_async(
request, additional_headers=x_additional_headers, **kwargs
)
@same_doc_as(GetBannedUsers)
def get_banned_users(
user_ids: List[str],
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Get banned user (getBannedUsers)
Get banned status.
Unbanned users will not return, for example: request has 8 userIds, only 5 of then were banned, then the api will these 5 user status.Other detail info:
* Required permission : resource= "ADMIN:NAMESPACE:{namespace}:ACTION" , action=2 (READ)
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:ACTION [READ]
Properties:
url: /basic/v1/admin/namespaces/{namespace}/actions/banned
method: GET
tags: ["UserAction"]
consumes: []
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
namespace: (namespace) REQUIRED str in path
user_ids: (userIds) REQUIRED List[str] in query
Responses:
200: OK - List[ADTOObjectForEqu8UserBanStatus] (successful operation)
400: Bad Request - ErrorEntity (11621: Invalid EQU8 api key in namespace [{namespace}])
404: Not Found - ErrorEntity (11641: Equ8 config not found in namespace [{namespace}])
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
500: Internal Server Error - ErrorEntity (20000: internal server error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = GetBannedUsers.create(
user_ids=user_ids,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(GetBannedUsers)
async def get_banned_users_async(
user_ids: List[str],
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Get banned user (getBannedUsers)
Get banned status.
Unbanned users will not return, for example: request has 8 userIds, only 5 of then were banned, then the api will these 5 user status.Other detail info:
* Required permission : resource= "ADMIN:NAMESPACE:{namespace}:ACTION" , action=2 (READ)
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:ACTION [READ]
Properties:
url: /basic/v1/admin/namespaces/{namespace}/actions/banned
method: GET
tags: ["UserAction"]
consumes: []
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
namespace: (namespace) REQUIRED str in path
user_ids: (userIds) REQUIRED List[str] in query
Responses:
200: OK - List[ADTOObjectForEqu8UserBanStatus] (successful operation)
400: Bad Request - ErrorEntity (11621: Invalid EQU8 api key in namespace [{namespace}])
404: Not Found - ErrorEntity (11641: Equ8 config not found in namespace [{namespace}])
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
500: Internal Server Error - ErrorEntity (20000: internal server error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = GetBannedUsers.create(
user_ids=user_ids,
namespace=namespace,
)
return await run_request_async(
request, additional_headers=x_additional_headers, **kwargs
)
@same_doc_as(GetUserStatus)
def get_user_status(
user_id: str,
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Get user status (getUserStatus)
Get user status.
If actionId does not exist, then the user is not banned.If actionId and expires exist, then the user is temporarily banned, if expires does not exist, then the user is permanently banned.Other detail info:
* Required permission : resource= "ADMIN:NAMESPACE:{namespace}:ACTION" , action=2 (READ)
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:ACTION [READ]
Properties:
url: /basic/v1/admin/namespaces/{namespace}/actions/status
method: GET
tags: ["UserAction"]
consumes: []
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
namespace: (namespace) REQUIRED str in path
user_id: (userId) REQUIRED str in query
Responses:
200: OK - ADTOObjectForEqu8UserStatus (successful operation)
400: Bad Request - ErrorEntity (11621: Invalid EQU8 api key in namespace [{namespace}])
404: Not Found - ErrorEntity (11641: Equ8 config not found in namespace [{namespace}])
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
500: Internal Server Error - ErrorEntity (20000: internal server error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = GetUserStatus.create(
user_id=user_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(GetUserStatus)
async def get_user_status_async(
user_id: str,
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Get user status (getUserStatus)
Get user status.
If actionId does not exist, then the user is not banned.If actionId and expires exist, then the user is temporarily banned, if expires does not exist, then the user is permanently banned.Other detail info:
* Required permission : resource= "ADMIN:NAMESPACE:{namespace}:ACTION" , action=2 (READ)
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:ACTION [READ]
Properties:
url: /basic/v1/admin/namespaces/{namespace}/actions/status
method: GET
tags: ["UserAction"]
consumes: []
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
namespace: (namespace) REQUIRED str in path
user_id: (userId) REQUIRED str in query
Responses:
200: OK - ADTOObjectForEqu8UserStatus (successful operation)
400: Bad Request - ErrorEntity (11621: Invalid EQU8 api key in namespace [{namespace}])
404: Not Found - ErrorEntity (11641: Equ8 config not found in namespace [{namespace}])
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
500: Internal Server Error - ErrorEntity (20000: internal server error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = GetUserStatus.create(
user_id=user_id,
namespace=namespace,
)
return await run_request_async(
request, additional_headers=x_additional_headers, **kwargs
)
@same_doc_as(PublicReportUser)
def public_report_user(
user_id: str,
body: Optional[UserReportRequest] = None,
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Report a game user (publicReportUser)
This API is used to report a game user.
Other detail info:
* Required permission : resource="NAMESPACE:{namespace}:USER:{userId}:ACTION", action=1 (CREATE)
Required Permission(s):
- NAMESPACE:{namespace}:USER:{userId}:ACTION [CREATE]
Properties:
url: /basic/v1/public/namespaces/{namespace}/users/{userId}/actions/report
method: POST
tags: ["UserAction"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
body: (body) OPTIONAL UserReportRequest in body
namespace: (namespace) REQUIRED str in path
user_id: (userId) REQUIRED str in path
Responses:
201: Created - (successful operation)
400: Bad Request - ErrorEntity (20026: publisher namespace not allowed)
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = PublicReportUser.create(
user_id=user_id,
body=body,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(PublicReportUser)
async def public_report_user_async(
user_id: str,
body: Optional[UserReportRequest] = None,
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Report a game user (publicReportUser)
This API is used to report a game user.
Other detail info:
* Required permission : resource="NAMESPACE:{namespace}:USER:{userId}:ACTION", action=1 (CREATE)
Required Permission(s):
- NAMESPACE:{namespace}:USER:{userId}:ACTION [CREATE]
Properties:
url: /basic/v1/public/namespaces/{namespace}/users/{userId}/actions/report
method: POST
tags: ["UserAction"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
body: (body) OPTIONAL UserReportRequest in body
namespace: (namespace) REQUIRED str in path
user_id: (userId) REQUIRED str in path
Responses:
201: Created - (successful operation)
400: Bad Request - ErrorEntity (20026: publisher namespace not allowed)
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = PublicReportUser.create(
user_id=user_id,
body=body,
namespace=namespace,
)
return await run_request_async(
request, additional_headers=x_additional_headers, **kwargs
)
@same_doc_as(ReportUser)
def report_user(
body: Optional[UserReportRequest] = None,
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Report a game player(for game service) (reportUser)
This API is for game service to report a game player.
Other detail info:
* Required permission : resource="ADMIN:NAMESPACE:{namespace}:ACTION", action=1 (CREATE)
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:ACTION [CREATE]
Properties:
url: /basic/v1/admin/namespaces/{namespace}/actions/report
method: POST
tags: ["UserAction"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
body: (body) OPTIONAL UserReportRequest in body
namespace: (namespace) REQUIRED str in path
Responses:
201: Created - (successful operation)
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = ReportUser.create(
body=body,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(ReportUser)
async def report_user_async(
body: Optional[UserReportRequest] = None,
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Report a game player(for game service) (reportUser)
This API is for game service to report a game player.
Other detail info:
* Required permission : resource="ADMIN:NAMESPACE:{namespace}:ACTION", action=1 (CREATE)
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:ACTION [CREATE]
Properties:
url: /basic/v1/admin/namespaces/{namespace}/actions/report
method: POST
tags: ["UserAction"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
body: (body) OPTIONAL UserReportRequest in body
namespace: (namespace) REQUIRED str in path
Responses:
201: Created - (successful operation)
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = ReportUser.create(
body=body,
namespace=namespace,
)
return await run_request_async(
request, additional_headers=x_additional_headers, **kwargs
)
@same_doc_as(UnBanUsers)
def un_ban_users(
body: Optional[ADTOForUnbanUserAPICall] = None,
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Unban user (unBanUsers)
Unban user.
Other detail info:
* Required permission : resource= "ADMIN:NAMESPACE:{namespace}:ACTION" , action=4 (UPDATE)
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:ACTION [UPDATE]
Properties:
url: /basic/v1/admin/namespaces/{namespace}/actions/unban
method: POST
tags: ["UserAction"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
body: (body) OPTIONAL ADTOForUnbanUserAPICall in body
namespace: (namespace) REQUIRED str in path
Responses:
200: OK - (successful operation)
400: Bad Request - ErrorEntity (11621: Invalid EQU8 api key in namespace [{namespace}])
404: Not Found - ErrorEntity (11041: Equ8 config not found in namespace [{namespace}])
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
500: Internal Server Error - ErrorEntity (20000: internal server error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = UnBanUsers.create(
body=body,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(UnBanUsers)
async def un_ban_users_async(
body: Optional[ADTOForUnbanUserAPICall] = None,
namespace: Optional[str] = None,
x_additional_headers: Optional[Dict[str, str]] = None,
**kwargs
):
"""Unban user (unBanUsers)
Unban user.
Other detail info:
* Required permission : resource= "ADMIN:NAMESPACE:{namespace}:ACTION" , action=4 (UPDATE)
Required Permission(s):
- ADMIN:NAMESPACE:{namespace}:ACTION [UPDATE]
Properties:
url: /basic/v1/admin/namespaces/{namespace}/actions/unban
method: POST
tags: ["UserAction"]
consumes: ["application/json"]
produces: ["application/json"]
securities: [BEARER_AUTH] or [BEARER_AUTH]
body: (body) OPTIONAL ADTOForUnbanUserAPICall in body
namespace: (namespace) REQUIRED str in path
Responses:
200: OK - (successful operation)
400: Bad Request - ErrorEntity (11621: Invalid EQU8 api key in namespace [{namespace}])
404: Not Found - ErrorEntity (11041: Equ8 config not found in namespace [{namespace}])
422: Unprocessable Entity - ValidationErrorEntity (20002: validation error)
500: Internal Server Error - ErrorEntity (20000: internal server error)
"""
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = UnBanUsers.create(
body=body,
namespace=namespace,
)
return await run_request_async(
request, additional_headers=x_additional_headers, **kwargs
)
|
23,003 | 724fe9c780ff6a9254c09d1923b2013232f0da22 | import pandas as pd
import numpy as np
import scipy as sp
from sklearn.preprocessing import PowerTransformer, LabelEncoder, OneHotEncoder, QuantileTransformer, MinMaxScaler
from sklearn.cluster import KMeans
from collections import OrderedDict
from sklearn.model_selection import train_test_split
from sklearn.svm import LinearSVR
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import SGDRegressor, LassoCV, LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.preprocessing import PolynomialFeatures
from sklearn.metrics import mean_squared_error, r2_score, explained_variance_score
from acquire import get_zillow
from prepare import prep_zillow
from split_scale import scale
# Model experimentation
zillow = get_zillow()
zillow = prep_zillow(zillow)
# ENCODE FIPS AND CITY
# actually not sure how to do that with city... there are many different values. might have to map them or something?
# 6037 is LA, 6059 is Orange, 6111 is Ventura
zillow['la'] = (zillow['fips'] == 6037).astype(int)
zillow['orange'] = (zillow['fips'] == 6059).astype(int)
zillow['ventura'] = (zillow['fips'] == 6111).astype(int)
# split data
train, test = train_test_split(zillow, test_size=.30, random_state=123)
X1_train = train[['sqft', 'lotsqft', 'tax', 'age']]
y1_train = train['logerror']
test1 = test[['sqft', 'lotsqft', 'tax', 'age', 'logerror']]
features = ['beds_and_baths', 'sqft', 'fireplace', 'lat', 'long', 'lotsqft', 'pool',
'tax', 'age', 'strucvaluebysqft', 'landvaluebysqft', 'la', 'orange', 'ventura']
target = 'logerror'
# SCALE
uniform = ['beds_and_baths', 'sqft', 'lotsqft', 'strucvaluebysqft', 'landvaluebysqft', 'tax']
minmax = ['lat', 'long', 'age']
uniform_scaler, train, test = scale(train, test, uniform, scaler='uniform')
minmax_scaler, train, test = scale(train, test, minmax, scaler='minmax')
X_train = train[features]
X_test = test[features]
y_train = train[target]
y_test = test[target]
# CLUSTERS
neighborhood = ['lat', 'long', 'strucvaluebysqft', 'landvaluebysqft']
amenities = ['beds_and_baths', 'sqft', 'lotsqft', 'age']
# SP - 11/11 changed from a print to a return statement at end of function.
def cluster_exam(max_k, X_train, features):
ks = range(1, max_k + 1)
sse = []
for k in ks:
kmeans = KMeans(n_clusters=k)
kmeans.fit(X_train[features])
# inertia: Sum of squared distanes of samplesto their closest cluster
sse.append(kmeans.inertia_)
return pd.DataFrame(dict(k=ks, sse=sse)).assign(change_in_sse=lambda df: df.sse.diff())
# [Insert into NB]
# cluster_exam(10, X_train, neighborhood) # Let's go with 7 or 8
# cluster_exam(10, X_train, amenities) # Probably 3, 5, or 8
# Data of all cluster features added:
X3_train = X_train.copy(deep=True)
test3 = test.copy(deep=True)
neighborhood_kmeans = KMeans(n_clusters=8)
neighborhood_kmeans.fit(X_train[neighborhood])
X3_train['neighborhood'] = neighborhood_kmeans.predict(X3_train[neighborhood])
test3['neighborhood'] = neighborhood_kmeans.predict(test3[neighborhood])
amenities_kmeans = KMeans(n_clusters=8)
amenities_kmeans.fit(X_train[amenities])
X3_train['amenities'] = amenities_kmeans.predict(X3_train[amenities])
test3['amenities'] = amenities_kmeans.predict(test3[amenities])
neighbor_feats = []
amenities_feats = []
# Encoding cluster features of neighborhood, and amenities, like OHE but not OHE
for i in range(1, 9):
X3_train['n' + str(i)] = (X3_train.neighborhood == i).astype(int)
X3_train['a' + str(i)] = (X3_train.amenities == i).astype(int)
test3['n' + str(i)] = (test3.neighborhood == i).astype(int)
test3['a' + str(i)] = (test3.amenities == i).astype(int)
neighbor_feats.append('n' + str(i))
amenities_feats.append('a' + str(i))
X3_train = X3_train.drop(columns=(amenities + neighborhood + ['amenities', 'neighborhood']))
test3 = test3.drop(columns=(amenities + neighborhood + ['amenities', 'neighborhood']))
# DATA VALIDATION
bool_features = ['fireplace', 'pool', 'la', 'orange', 'ventura']
def validation(val_train, val_y_train, val_test, cols):
to_drop = []
val_train['logerror'] = val_y_train
for feature in cols:
pval = sp.stats.ttest_ind(
val_train[val_train[feature] == 0].logerror.dropna(),
val_train[val_train[feature] == 1].logerror.dropna())[1]
if pval > .05:
to_drop.append(feature)
res_train = val_train.drop(columns=to_drop)
res_test = val_test.drop(columns=to_drop)
return res_train, res_test
X2_train, test2 = validation(X_train, y_train, test, bool_features)
X2_train = X2_train.drop(columns='logerror')
X4_train, test4 = validation(X3_train, y_train, test3, bool_features + neighbor_feats + amenities_feats)
X4_train = X4_train.drop(columns='logerror')
# MODEL
def evaluate(model, x_train, y_train):
y_pred = model.predict(x_train)
rmse = mean_squared_error(y_train, y_pred)**1/2
return rmse
def evaluate_dict(model_dict):
scores = {}
for model in model_dict:
obj = model_dict[model]['obj']
x = model_dict[model]['data']['x']
y = model_dict[model]['data']['y']
scores[model] = evaluate(obj, x, y)
return scores
# Takes a dictionary of models and returns the best performing model
# Dictionary format example:
# model_dict = {'model_name': {'obj': model_object, 'data': data_dict}}
def best_model(model_dict):
best = [None, 1]
for model in model_dict:
model_obj = model_dict[model]['obj']
x = model_dict[model]['data']['x']
y = model_dict[model]['data']['y']
error = evaluate(model_obj, x, y)
if error < best[1]:
best = [model_dict[model], error]
return best
def best_test(model_dict):
best = [None, 1]
for model in model_dict:
model_obj = model_dict[model]['obj']
x = model_dict[model]['data']['test'][model_dict[model]['data']['x'].columns]
y = model_dict[model]['data']['test']['logerror']
error = evaluate(model_obj, x, y)
if error < best[1]:
best = [model_dict[model], error]
return best
# data1 includes our data without scaling or encoding and only uses these features: 'sqft', 'lotsqft', 'tax', 'age'
# data includes all features with scaling and encoding
# data2 eliminates features that did not pass our data validation test
# data3 is data with all cluster features added
# data3a is data3 but only amenities clusters
# data3n is data3 but only neighborhood clusters
# data4 is data3 but with feature validation
# data4a is data4 but only amenities clusters
# data3n is data4 but only neighborhood clusters
rem_neighbor = [c for c in X4_train.columns if c[0] == 'n']
rem_amenity = [c for c in X4_train.columns if c[0] == 'a']
insig = ['age', 'la', 'lotsqft', 'orange', 'pool', 'sqft', 'tax']
X3_train = X3_train.drop(columns='logerror')
datasets = {
'data1': {'x': X1_train, 'y': y1_train, 'test': test1},
'data': {'x': X_train.drop(columns='logerror'), 'y': y_train, 'test': test},
'data2': {'x': X2_train, 'y': y_train, 'test': test2},
'data3': {'x': X3_train, 'y': y_train, 'test': test3},
'data3a': {'x': X3_train.drop(columns=neighbor_feats), 'y': y_train, 'test': test3.drop(columns=neighbor_feats)},
'data3n': {'x': X3_train.drop(columns=amenities_feats), 'y': y_train, 'test': test3.drop(columns=amenities_feats)},
'data4': {'x': X4_train, 'y': y_train, 'test': test4},
'data4a': {'x': X4_train.drop(columns=rem_neighbor), 'y': y_train, 'test': test4.drop(columns=rem_neighbor)},
'data4n': {'x': X4_train.drop(columns=rem_amenity), 'y': y_train, 'test': test4.drop(columns=rem_amenity)},
'data5': {'x': X2_train.drop(columns=insig), 'y': y_train, 'test': test2.drop(columns=insig)}}
def many_models(dataset):
data_x = dataset['x']
data_y = dataset['y']
models = {}
lm2 = LinearRegression()
lm2.fit(data_x, data_y)
models['lm2'] = {'obj': lm2, 'data': dataset}
lm3 = LinearSVR(random_state=123)
lm3.fit(data_x, data_y)
models['lm3'] = {'obj': lm3, 'data': dataset}
lm4 = LinearSVR(random_state=123, dual=False, loss='squared_epsilon_insensitive')
lm4.fit(data_x, data_y)
models['lm4'] = {'obj': lm4, 'data': dataset}
lm5 = SGDRegressor(random_state=123)
lm5.fit(data_x, data_y)
models['lm5'] = {'obj': lm5, 'data': dataset}
lm6 = SGDRegressor(random_state=123, loss='huber')
lm6.fit(data_x, data_y)
models['lm6'] = {'obj': lm6, 'data': dataset}
lm7 = SGDRegressor(random_state=123, loss='epsilon_insensitive')
lm7.fit(data_x, data_y)
models['lm7'] = {'obj': lm7, 'data': dataset}
lm8 = SGDRegressor(random_state=123, loss='squared_epsilon_insensitive')
lm8.fit(data_x, data_y)
models['lm8'] = {'obj': lm8, 'data': dataset}
lm9 = LassoCV()
lm9.fit(data_x, data_y)
models['lm9'] = {'obj': lm8, 'data': dataset}
return models
best_ones = {}
for dataset in datasets:
models = many_models(datasets[dataset])
best_ones[dataset] = [best_model(models)[0]['obj'], best_model(models)[1]]
best_tests = {}
for dataset in datasets:
models = many_models(datasets[dataset])
best_tests[dataset] = [best_test(models)[0]['obj'], best_test(models)[1]]
set(zip(datasets['data5']['x'].columns, best_ones['data5'][0].coef_)) |
23,004 | 5d9de2a1e16e07f75a19386337dbb36798ee31de | import torch
import torch.nn.functional as F
import numpy as np
import copy
import time
from sklearn.model_selection import train_test_split
class DQNModule(torch.nn.Module):
def __init__(self,n_classes,dropout,feature_extraction_module,linear_sizes):
super(DQNModule, self).__init__()
self.feature_extraction_module = feature_extraction_module
layers = []
prev_size=feature_extraction_module.output_size
for linear_size in linear_sizes:
layers.append(torch.nn.Linear(prev_size,linear_size))
layers.append(torch.nn.LeakyReLU())
layers.append(torch.nn.Dropout(dropout))
prev_size=linear_size
layers.append(torch.nn.Linear(prev_size,n_classes))
layers.append(torch.nn.Softmax(dim=0))
self.layers = torch.nn.Sequential(*layers)
def forward(self, input):
return self.layers(torch.max(self.feature_extraction_module(input),dim=0)[0])
class DQNNet:
"""Note that this network does NOT use a classifier.
HistNet builds creates artificial samples with fixed size and learns from them. Every example in each sample goes through
the network and we build a histogram with all the examples in a sample. This is used in the second part of the network where we use
this vector to quantify.
Args:
train_epochs (int): How many times to repeat the process of going over training data.
test_epochs (int): How many times to repeat the process over the testing data (returned prevalences are averaged)
start_lr (float): Learning rate for the network (initial value)
end_lr (float): Learning rate for the network. The value will be decreasing after a few epochs without improving.
n_bags (int): How many artificial samples to build per epoch.
bag_size (int): Number of examples per sample.
random_seed (int): Seed to make results reproducible. This net need to generate the bags so the seed is important
dropout (float): Dropout to use in the network (avoid overfitting)
weight_decay (float): L2 regularization for the model
val_split (float): by default we validate using the train data. If a split is given, we partition the data and use this percentage for
validation and early stopping
loss_function: loss function to optimize. The progress and early stopping will be based in L1 always.
epsilon (float): if the error is less than this number, do not update the weights.
device (torch.device): Device to use for training/testing
callback_epoch: function to call after each epoch. Useful to optimize with Optuna
verbose (int): verbose
dataset_name (str): only for loggin purposes
"""
def __init__(self,train_epochs,test_epochs,start_lr,end_lr,n_bags,bag_size,random_seed,linear_sizes,
feature_extraction_module,bag_generator,batch_size,dropout=0,weight_decay=0,lr_factor=0.1,val_split=0,loss_function=torch.nn.L1Loss(),
epsilon=0,device=torch.device('cpu'),patience=20,callback_epoch=None,verbose=0,dataset_name=""):
self.train_epochs = train_epochs
self.test_epochs = test_epochs
self.start_lr = start_lr
self.end_lr = end_lr
self.n_bags = n_bags
self.bag_size = bag_size
self.random_seed = random_seed
self.linear_sizes = linear_sizes
self.bag_generator=bag_generator
self.dropout = dropout
self.weight_decay=weight_decay
self.lr_factor = lr_factor
self.batch_size=batch_size
self.val_split=val_split
self.patience = patience
self.loss_function = loss_function
self.device = device
self.verbose = verbose
self.epsilon = epsilon
self.callback_epoch = callback_epoch
self.dataset_name = dataset_name
self.feature_extraction_module = feature_extraction_module
#make results reproducible
torch.manual_seed(random_seed)
def move_data_device(self,data):
if torch.is_tensor(data):
return data.to(self.device)
else:
if data.dtype=='float64':
return torch.tensor(data).float().to(self.device)
elif data.dtype=='int32' or data.dtype=='int64':
return torch.tensor(data).long().to(self.device)
else:
return torch.tensor(data).to(self.device)
def compute_validation_loss(self,X_val,y_val,loss):
samples_indexes,p = self.bag_generator.compute_train_bags(n_bags=self.n_bags,bag_size=self.bag_size,y=y_val)
val_loss = 0
l1_loss = 0
with torch.no_grad():
self.model.eval()
for i,sample_indexes in enumerate(samples_indexes):
X_bag = X_val.index_select(0,sample_indexes)
y_bag = y_val.index_select(0,sample_indexes)
p_hat = self.model.forward(X_bag)
total_loss = loss(p_hat,p[i,:])
l1_loss += F.l1_loss(p_hat,p[i,:]).item()
val_loss += total_loss.item()
val_loss /= self.n_bags
l1_loss /= self.n_bags
return val_loss,l1_loss #We want to monitor always l1_loss
def fit(self, X, y):
#split training into train and validation
if self.val_split>0:
X_train, X_val, y_train, y_val= train_test_split(X, y, test_size=self.val_split, stratify=y, random_state=self.random_seed)
X_val = self.move_data_device(X_val)
y_val = self.move_data_device(y_val)
if self.verbose>0:
print("Spliting {} examples in training set [training: {}, validation: {}]".format(X.shape,X_train.shape,X_val.shape))
else:
X_train = X
y_train = y
#compute some data from the training dataset: n_features, n_examples, classes and n_classes
self.n_features = X.shape[1]
self.classes=np.unique(y)
self.n_classes = len(self.classes)
self.model = DQNModule(n_classes=self.n_classes,dropout=self.dropout,feature_extraction_module=self.feature_extraction_module,
linear_sizes=self.linear_sizes)
self.model.to(self.device)
self.best_error = 1 #Highest value. We want to store the best error during the epochs
#if os.path.isfile('model.pyt'):
# self.model.load_state_dict(torch.load('model.pyt'))
# return
#Move data to device
X_train = self.move_data_device(X_train)
y_train = self.move_data_device(y_train)
if self.verbose>0:
print("Using device {}".format(self.device))
loss = self.loss_function
self.optimizer = torch.optim.AdamW(self.model.parameters(),lr=self.start_lr,weight_decay=self.weight_decay)
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(self.optimizer,patience=self.patience,factor=self.lr_factor,cooldown=0,verbose=True)
for epoch in range(self.train_epochs):
start_epoch = time.time()
if self.verbose>0:
print("[{}] Starting epoch {}...".format(self.dataset_name,epoch),end='')
#compute the training bags
samples_indexes,p = self.bag_generator.compute_train_bags(n_bags=self.n_bags,bag_size=self.bag_size,y=y_train)
self.model.train()
train_loss = 0
l1_loss_tr = 0
for i,sample_indexes in enumerate(samples_indexes):
X_bag = X_train.index_select(0,sample_indexes)
p_hat = self.model.forward(X_bag)
quant_loss = loss(p_hat,p[i,:])
total_loss = quant_loss
l1_loss_tr += F.l1_loss(p_hat,p[i,:]).item()
train_loss += total_loss.item()
if (abs(quant_loss.item())>=self.epsilon): #Juanjo Idea. Taken from SVR.
total_loss.backward()
if i%self.batch_size==0 or i==self.n_bags-1:
self.optimizer.step()
self.optimizer.zero_grad()
train_loss /= self.n_bags
l1_loss_tr /= self.n_bags
end_epoch = time.time()
elapsed = end_epoch - start_epoch
print("[Time:{:.2f}s]".format(elapsed),end='')
if self.val_split>0:
val_loss,l1_loss_val = self.compute_validation_loss(X_val,y_val,loss)
else:
val_loss=train_loss
l1_loss_val = l1_loss_tr
if self.callback_epoch is not None:
self.callback_epoch(val_loss,epoch)
if self.verbose>0:
print("finished. Traing Loss=[{:.5f},L1={:.5f}]. Val loss = [{:.5f},L1={:.5f}]".format(train_loss,l1_loss_tr,val_loss,l1_loss_val),end='')
#Save the best model up to this moment
if l1_loss_val<self.best_error:
self.best_error = l1_loss_val
self.best_model = copy.deepcopy(self.model.state_dict())
if self.verbose>0:
print("[saved best model in this epoch]",end='')
print("")
self.scheduler.step(val_loss)
if self.optimizer.param_groups[0]['lr']<self.end_lr:
if self.verbose>0:
print("Early stopping!")
break
if self.verbose>0:
print("Restoring best model...")
self.model.load_state_dict(self.best_model)
#torch.save(self.model.state_dict(), 'model.pyt')
return self.best_error
def predict(self, X):
"""Makes the prediction over each sample repeated for n epochs. Final result will be the average."""
X = self.move_data_device(X)
#Special case to compare with Sebastiani
if X.shape[0]==self.bag_size:
with torch.no_grad():
self.model.eval()
return self.model.forward(X).cpu().detach().numpy()
else:
predictions=torch.zeros((self.n_bags*self.test_epochs,self.n_classes),device=self.device)
for epoch in range(self.test_epochs):
start_epoch = time.time()
if self.verbose>10:
print("[{}] Starting testing epoch {}... ".format(self.dataset_name,epoch),end='')
samples_indexes= self.bag_generator.compute_prediction_bags(dataset_size=X.shape[0],n_bags=self.n_bags,bag_size=self.bag_size)
with torch.no_grad():
self.model.eval()
for i,sample_indexes in enumerate(samples_indexes):
predictions[(epoch*self.n_bags)+i,:] = self.model.forward(X[sample_indexes,:])
end_epoch = time.time()
elapsed = end_epoch - start_epoch
print("[Time:{:.2f}s]".format(elapsed),end='')
print("done.")
return torch.mean(predictions,axis=0).cpu().detach().numpy()
|
23,005 | 013098df04cb6d1663a927857fbc81a7f40e0cf5 | # (c) Copyright 2018 Palantir Technologies Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
from setuptools import find_packages, setup, Command
from os import path, makedirs, system
import subprocess
import sys
VERSION_PY_PATH = "conjure_python_client/_version.py"
if not path.exists(VERSION_PY_PATH):
try:
gitversion = (
subprocess.check_output(
"git describe --tags --always --first-parent".split()
)
.decode()
.strip()
.replace("-", "_")
)
open(VERSION_PY_PATH, "w").write(
'__version__ = "{}"\n'.format(gitversion)
)
if not path.exists("build"):
makedirs("build")
except subprocess.CalledProcessError:
print("outside git repo, not generating new version string")
exec(open(VERSION_PY_PATH).read())
class FormatCommand(Command):
"""Enables setup.py format."""
description = "Reformat python files using 'black'"
user_options = [
("check", "c", "Don't write the files back, just return the status")
]
def initialize_options(self):
self.check = False
def finalize_options(self):
if self.check != False:
self.check = True
pass
def run(self):
try:
if self.check:
code = self.blackCheck()
else:
code = self.black()
if code == 0:
sys.exit(0)
else:
sys.exit(1)
except OSError:
pass
def black(self):
return system("black --line-length 79 .")
def blackCheck(self):
return system("black --check --quiet --line-length 79 .")
setup(
name="conjure-python-client",
version=__version__,
description="Conjure Python Library",
# The project's main homepage.
url="https://github.com/palantir/conjure-python-client",
author="Palantir Technologies, Inc.",
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10"
],
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=["test*", "integration*"]),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=["requests"],
tests_require=["pytest", "pyyaml"],
python_requires=">=3.8",
cmdclass={"format": FormatCommand},
package_data={
"conjure_python_client": ["py.typed"],
},
)
|
23,006 | 9666c5f326f6cd4f3a9b35c524e1efed8268e6a9 | class Testing:
def __init__(self, testTypes, experience):
self.testTypess = testTypes
self.__experience = experience
def display(self):
print(self.testTypess)
# Private variable or method should be use (__) prefix the name of the variable and function
# we can use sing underscore(_) but it is just for name convention.
print(self.__experience)
# Private method can access with in a class by using self object
self.__private_method()
def getAge(self):
print(self.__experience)
def setAge(self, experience):
self.__experience = experience
def __private_method(self):
print("Private Method")
test_obj = Testing('Manual Testing', 5)
# call class method
test_obj.display()
# Changing experience value using setter method
test_obj.getAge()
test_obj.setAge(6)
test_obj.getAge()
print(test_obj.testTypess)
# Private variable and Method cannot access outside the class
# print(test_obj.__experience)
# test_obj.__private_method()
|
23,007 | 7e9da0bef1e328d6273badb742d5c368a92a9e55 | #!/usr/bin/env python3
# coding=utf-8
# File:__init__.py.py
# Author:LGSP_Harold
|
23,008 | 41db1db1cf4cb970d10fef8137ddee38c99fb4b0 | from config.dbconfig import pg_config
import psycopg2
class HasTagDAO:
def __init__(self):
connection_url = "dbname=%s user=%s password=%s" % (pg_config['dbname'],
pg_config['user'],
pg_config['passwd'])
self.conn = psycopg2._connect(connection_url)
def getHashtagsInMessage(self, message_id):
cursor = self.conn.cursor()
query = "select * from has where has.message_id = %s;"
cursor.execute(query, (message_id,))
result = []
for row in cursor:
result.append(row)
return result
def getMessagesWithHashtagID(self, hashtag_id):
cursor = self.conn.cursor()
query = "select * from message natural inner join has where has.hashtag_id = %s;"
cursor.execute(query, (hashtag_id,))
result = []
for row in cursor:
result.append(row)
return result
def getMessagesWithHashtagText(self, hashtag_text):
cursor = self.conn.cursor()
query = "select * from message natural inner join has natural inner join hashtag where hashtag.hashtag_text = %s;"
cursor.execute(query, (hashtag_text,))
result = []
for row in cursor:
result.append(row)
return result
def insert(self, message_id,hashtag_id):
cursor = self.conn.cursor()
query = "insert into has(message_id, hashtag_id) values(%s,%s) returning message_id;"
cursor.execute(query, (message_id,hashtag_id,))
result = cursor.fetchone()[0]
self.conn.commit()
return result
|
23,009 | a6156a4b0c9948e3d8889c62c25b56b9a8a1940e | # %% Wyświetlanie wyniku
print('Hello world')
# %% operatory matematyczne
print(1 + 1)
print(2 - 3)
print(3 % 2) # modulo czyli reszta z dzielenia
print(3 // 2) # dzielenie bez reszty
print(3 / 2)
print(3 * 2)
print(3 ** 2)
# %% operatory lokalne
x = 10
x += 3
print(x)
x *= 3
print(x)
x %= 2
print(x)
# %% typy danych
# całkowite int
print(2, 3, -5, 0)
# zmienno przecinkowe float
print(1.0, -3.4, 0.5)
# ciągi tekstowe str
print('Adam', 'Ala')
# %% zmiana typów nie do wszystkich zadań
a, b, c = 4, 'ala', 3.4
print(a, b, c)
print(type(a), type(b), type(c))
# zadnaie mnożenia
print(b * a)
# aby móc wykonać zadanie +
a1 = str(a)
c1 = str(c)
print(a1, b, c1, sep=", ")
print(a1 + b + c1)
a2 = float(a)
c2 = int(c)
print(type(a2), type(c2))
print(a2, c2)
# %% podstawowe funkcje pythona do działania an typach
#print() - wyswitlanie
word = 'Hello world'
print(word)
# input() - umieszczanie danych
word2 = input('podaj cokolwiek')
print(word2)
# len() - gługość ciągu
print(len(word2))
# %% objects
object_one = []
object_two = []
object_three = object_one
object_four = object_two[:]
print(id(object_one))
print(id(object_two))
print(id(object_three))
print(id(object_four))
if (object_one == object_two):
print("True")
else:
print("False")
if (object_one is object_two):
print("True")
else:
print("False")
if (object_one is object_three):
print("True")
else:
print("False")
if (object_two is object_four):
print("True")
else:
print("False")
# %%
|
23,010 | b65bd022ea3cb0b19b29aaf694b39e35f92f16be | import re
filename = 'day7input.txt'
with open(filename) as fp:
content = fp.readlines()
# for the ALL questions must be answered the same, we need the line breaks to determine different people in a group
bagsetall = []
bagindex = []
removechar = ['bags', 'bag', '.', ' ', '\n']
for i in range(len(content)):
bagline = content[i].split(' bags contain')
for n in removechar:
bagline[0] = bagline[0].replace(n,'')
bagline[1] = bagline[1].replace(n,'')
bagline[0] = re.sub(r'\d+', '', bagline[0])
bagline[1] = re.sub(r'\d+', '', bagline[1])
bagnest = bagline[1].split(',')
bagsetall.append(bagnest)
bagindex.append(bagline[0])
# this recursively looks at each bag, and what bags can be in those bags, and what bags can be in those bags- it counts every time 'shinygold' bags appear
goldcounter = 0
def nestingdoll(baglist, goldcounter, bagindex, bagsetall):
ender =['noother']*len(baglist)
if baglist == ender:
return goldcounter
else:
goldcounter = goldcounter + baglist.count('shinygold')
outbaglist = []
for m in range(len(baglist)):
if baglist[m] != 'noother':
outbaglist = outbaglist + bagsetall[bagindex.index(baglist[m])]
return nestingdoll(outbaglist, goldcounter, bagindex, bagsetall)
goldspercolor = []
for j in range(len(bagindex)):
goldspercolor.append(nestingdoll(bagsetall[j], goldcounter, bagindex, bagsetall))
counter = [0]*len(goldspercolor)
for j in range(len(goldspercolor)):
if goldspercolor[j] > 0:
counter[j] = 1
else:
counter[j] = 0
print(sum(counter))
|
23,011 | 680b4231b5105885761e39f83a2e53bcfbf86221 | import gettext
import os
from typing import Callable
dirname = os.path.dirname(__file__)
localedir = os.path.join(dirname, "locales")
def get_equivalent_locale(locale: str) -> str:
if "zh" in locale and "Hans" not in locale and ("Hant" in locale or "TW" in locale):
return "zh-Hant"
if "zh" in locale:
return "zh-Hans"
return locale
def use_localize(locale: str) -> Callable[[str], str]:
languages = ["en"] if locale is None else [get_equivalent_locale(locale), "en"]
translation = gettext.translation("base", localedir=localedir, languages=languages)
return translation.gettext
|
23,012 | 8f4cab586b7ddffd37bb9630f785b2b15b959b99 | import os
import tensorflow as tf
os.environ["CUDA_VISIBLE_DEVICES"]= "0"
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess= tf.Session(config=config)
from inference import predict
import numpy as np
import time
from flask import Flask, request, make_response
app = Flask(__name__)
graph = tf.get_default_graph()
init_time = time.time()
@app.route('/', methods=['POST', 'GET'])
def video_preprocessing():
global graph
with graph.as_default():
input_video = request.form['input_path']
num_second = request.form['num_second']
output_path = request.form['output_path']
try:
features, error_code = predict(input_video)
if(error_code == 0):
np.save(output_path + 'video_preprop_' + num_second + '.npy',features)
print("accept")
else:
print("reject")
except:
print("unknown error")
error_code = 1
return str(error_code)
# Run
if __name__ == '__main__':
input_dir = "samples/img_good_case"
print('pre-loading time:', time.time()-init_time)
print(predict(input_dir))
app.run(host='0.0.0.0', port=9990, threaded=True, debug=False)
|
23,013 | e66b800ddb888eccb9d8388a076b45aba45db282 | import pygame as pg
import numpy as np
clock = pg.time.Clock()
FPS = 30
WIDTH = 800
HEIGHT = 800
pg.init()
class Projection:
def __init__(self, width, height):
self.width = width
self.height = height
self.screen = pg.display.set_mode((width, height))
self.background = (10, 10, 60)
pg.display.set_caption('ASCII 3D EARTH')
self.surfaces = {}
def addSurface(self, name, surface):
self.surfaces[name] = surface
def display(self):
self.screen.fill(self.background)
for surface in self.surfaces.values():
for node in surface.nodes:
pg.draw.circle(self.screen, (255, 255, 255), (int(node[1]), int(node[2])), 4, 0)
def rotateAll(self, theta):
for surface in self.surfaces.values():
center = surface.findCentre()
c = np.cos(theta)
s = np.sin(theta)
# Rotating about Z - axis
matrix = np.array([[c, -s, 0, 0],
[s, c, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
surface.rotate(center, matrix)
class Object:
def __init__(self):
self.nodes = np.zeros((0, 4))
def addNodes(self, node_array):
ones_column = np.ones((len(node_array), 1))
ones_added = np.hstack((node_array, ones_column))
self.nodes = np.vstack((self.nodes, ones_added))
def findCentre(self):
mean = self.nodes.mean(axis=0)
return mean
def rotate(self, center, matrix):
for i, node in enumerate(self.nodes):
self.nodes[i] = center + np.matmul(matrix, node - center)
spin = 0
running = True
while running:
clock.tick(FPS)
pv = Projection(WIDTH, HEIGHT)
cube = Object()
cube_nodes = ([(x, y, z) for x in (200, 600) for y in (200, 600) for z in (200, 600)])
cube.addNodes(np.array(cube_nodes))
pv.addSurface('cube', cube)
pv.rotateAll(spin)
pv.display()
for event in pg.event.get():
if event.type == pg.QUIT:
running = False
pg.display.update()
spin += 0.05
|
23,014 | 7ea6a2b259e12c170e13c4b9b6ec1131a36f2639 | def count_subarray_equals_0s_1s(ZerosOnes):
# length of the array
n = len(ZerosOnes)
# creation of hash table
um = dict()
curr_sum = 0
for i in range(n):
curr_sum += (-1 if(ZerosOnes[i] == 0) else ZerosOnes[i])
if um.get(curr_sum, None):
um[curr_sum] += 1
else:
um[curr_sum] = 1
count = 0
# traversing of hash table
for itr in um:
if um[itr] > 1:
count += ((um[itr] * int(um[itr] -1)) /2)
# add the subarrays starting from 1st element and have equal number of 1s and 0s
if um.get(0):
count += um[0]
return int(count)
print(count_subarray_equals_0s_1s([1,0,0,0,0,0,0]))
|
23,015 | d3c6c4161b8b65c7ed55c0301c1fe73918ec4199 | # https://atcoder.jp/contests/abc042/tasks/abc042_b
n,l = (int(x) for x in input().split())
s = [input() for x in range(n)]
s.sort()
answer = ""
for i in range(len(s)):
answer += s[i]
print(answer) |
23,016 | a7cfcd7b499ad74fc03f8a11e37f4b328410172a | #encoding: utf-8
import string
import random
import unicodedata
import os
import threading
import subprocess
import shlex
import re
import json
from configuracion import config
"""
Declara un cerrojo global para los bloqueos entre threads.
"""
lock = threading.Lock()
"""
Fija los valores de configuración por defecto
"""
def set_default_settings():
defaults = [
[ 'MAX_ENCODING_TASKS', 5 ],
[ 'MELT_PATH' , which('melt') ],
[ 'AVCONV_PATH', which('avconv') ],
[ 'MP4BOX_PATH', which('MP4Box') ],
[ 'CRONTAB_PATH', which('crontab') ],
[ 'MEDIAINFO_PATH', which('mediainfo') ],
[ 'MAX_PREVIEW_WIDTH', 400 ],
[ 'MAX_PREVIEW_HEIGHT', 300 ],
[ 'VIDEO_LIBRARY_PATH', '/home/adminudv/videos/videoteca/' ],
[ 'VIDEO_INPUT_PATH' , '/home/adminudv/videos/' ],
[ 'PREVIEWS_PATH' , '/home/adminudv/videos/previews/' ],
[ 'TOKEN_VALID_DAYS' , 7 ],
[ 'SITE_URL' , 'http://127.0.0.1:8000' ],
[ 'LOG_MAX_LINES', 1000 ],
[ 'MAX_NUM_LOGFILES', 6 ],
[ 'RETURN_EMAIL', 'noreply@dpcat.es' ],
]
for op in defaults:
config.get_option(op[0]) or config.set_option(op[0], op[1])
"""
Genera un token alfanumérico del tamaño dado
"""
def generate_token(length):
random.seed()
return "".join([random.choice(string.letters + string.digits) for x in range(length)])
"""
Normaliza una cadena para generar nombres de fichero seguros.
"""
def normalize_filename(name):
return unicodedata.normalize('NFKD', name).encode('ascii','ignore').translate(None, string.punctuation).replace(' ', '_')
"""
Genera un nombre de fichero para un nuevo vídeo
"""
def generate_safe_filename(name, date, extension):
day = date.strftime("%Y/%m/%d")
safename = normalize_filename(name)
return "%s_%s_%s%s" % (day, safename, generate_token(8), extension)
"""
Se asegura de que exista un directorio antes de crear un fichero en él.
"""
def ensure_dir(f):
d = os.path.dirname(f)
lock.acquire()
if not os.path.exists(d):
os.makedirs(d)
lock.release()
"""
Borra el fichero dado y los directorios que lo contienen si están vacíos.
"""
def remove_file_path(f):
if os.path.isfile(f):
os.remove(f)
try:
os.removedirs(os.path.dirname(f))
except OSError:
pass
"""
Comprueba si la ruta dada coresponde a un directorio
"""
def is_dir(path):
return os.path.isdir(path)
"""
Comprueba si la ruta dada corresponde a un fichero ejecutable
"""
def is_exec(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
"""
Trata de localizar la ruta del ejecutable dado en el PATH
"""
def which(fpath):
command = "/usr/bin/which %s" % fpath
return subprocess.Popen(shlex.split(str(command)), stdout = subprocess.PIPE).communicate()[0].strip()
"""
Devuelve la versión del avconv instalado.
"""
def avconv_version():
fpath = config.get_option('AVCONV_PATH')
if is_exec(fpath):
command = "%s -version" % fpath
data = subprocess.Popen(shlex.split(str(command)), stdout = subprocess.PIPE, stderr = subprocess.STDOUT).communicate()[0]
try:
return re.match('avconv version ([\.0-9]+)', data).group(1)
except AttributeError:
return None
"""
Devuelve la versión del melt instalado.
"""
def melt_version():
fpath = config.get_option('MELT_PATH')
if is_exec(fpath):
command = "%s -version" % fpath
data = subprocess.Popen(shlex.split(str(command)), stdout = subprocess.PIPE, stderr = subprocess.STDOUT).communicate()[0]
try:
return re.search('melt ([\.0-9]+)', data).group(1)
except AttributeError:
return None
"""
Devuelve la versión del mediainfo instalado.
"""
def mediainfo_version():
fpath = config.get_option('MEDIAINFO_PATH')
if is_exec(fpath):
command = "%s --Version" % fpath
data = subprocess.Popen(shlex.split(str(command)), stdout = subprocess.PIPE, stderr = subprocess.PIPE).communicate()[0]
try:
return re.search('(v[0-9\.]+)$', data).group(1)
except AttributeError:
return None
"""
Devuelve la versión del MP4Box instalado.
"""
def mp4box_version():
fpath = config.get_option('MP4BOX_PATH')
if is_exec(fpath):
command = "%s -version" % fpath
data = subprocess.Popen(shlex.split(str(command)), stdout = subprocess.PIPE, stderr = subprocess.STDOUT).communicate()[0]
try:
return re.search('version (\S*)', data).group(1)
except AttributeError:
return None
"""
Devuelve la información de uso del sistema de ficheros en el que se encuentra la ruta dada.
"""
def df(fpath):
command = "df %s -Ph" % fpath
data = subprocess.Popen(shlex.split(str(command)), stdout = subprocess.PIPE).communicate()[0].strip().splitlines()[1]
return re.search('^.* +([\.0-9,]+[KMGTPEZY]?) +([\.0-9,]+[KMGTPEZY]?) +([\.0-9,]+[KMGTPEZY]?) +([\.0-9,]+%) +(/.*$)', data).group(1, 2, 3, 4, 5)
"""
Comprueba si el directorio dado existe y es accesible. Si no existe y puede, lo creará y devolverá verdadero.
"""
def check_dir(fpath):
if os.path.isdir(fpath) and os.access(fpath, os.R_OK | os.W_OK | os.X_OK):
return True
if not os.path.exists(fpath):
try:
os.makedirs(fpath)
except:
return False
return True
else:
return False
"""
Convierte, en caso necesario, una marca temporal en formato HH:MM:SS.ss a segundos.
"""
def time_to_seconds(t):
try:
return float(t)
except ValueError:
ct = t.split(':')
return float(ct[0]) * 3600 + float(ct[1]) * 60 + float(ct[2])
"""
Clase envoltorio que permite iterar sobre un fichero.
"""
class FileIterWrapper(object):
def __init__(self, flo, chunk_size = 1024**2):
self.flo = flo
self.chunk_size = chunk_size
def next(self):
data = self.flo.read(self.chunk_size)
if data:
return data
else:
raise StopIteration
def __iter__(self):
return self
"""
Devuelve a modo de flujo el contenido del fichero dado.
"""
def stream_file(filename):
return FileIterWrapper(open(filename, "rb"))
|
23,017 | 2441562678283c2ec4c49f1b1c3d707fd78154bc | ############################################################
#
# dictionary
#
############################################################
empty = {}
months = {"Jan":1, "Feb":2, "May":5, "Dec":12}
seasons = {"Spring":("Mar","Apr","May"), "Summer":("Jun","Jul","Aug")}
print type(empty)
print months["Dec"]
print seasons["Summer"]
|
23,018 | 0bc4d1dbef3473a70095b8818bf5bb2943866d3b | """Add date_added cols to tables
Revision ID: 4db5404fea7a
Revises: 56fb768102cc
Create Date: 2017-10-18 14:18:53.804053
"""
# revision identifiers, used by Alembic.
revision = '4db5404fea7a'
down_revision = '56fb768102cc'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('phone_queue', sa.Column('date_added', sa.DateTime(), nullable=True))
op.add_column('phones', sa.Column('date_added', sa.DateTime(), nullable=True))
op.add_column('queues', sa.Column('date_added', sa.DateTime(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('queues', 'date_added')
op.drop_column('phones', 'date_added')
op.drop_column('phone_queue', 'date_added')
### end Alembic commands ###
|
23,019 | aaedc58e15bc91f56b8c6a3837609c463a58c461 | #!/usr/bin/env python
#coding:utf-8
import twitter
import os, codecs
import json
import copy
import datetime
JSONDUMPDIR = "dump_json"
consumerKey = ''
consumerSecret = ''
accessToken = ''
accessSecret = ''
consumerkeyfile = 'consumerkey'
accesstokenfile = 'accesstoken'
# load consumerKey from file
# consumerKey file should be: "<consumerKey>\n<consumerSecret>\n"
consumerKey, consumerSecret = twitter.read_token_file(consumerkeyfile)
if not os.path.exists(accesstokenfile):
twitter.oauth_dance("Dump List of Following Users", consumerKey, consumerSecret,
accesstokenfile)
accessToken, accessSecret = twitter.read_token_file(accesstokenfile)
tw = twitter.Twitter(auth=twitter.OAuth(
accessToken, accessSecret, consumerKey, consumerSecret))
def dumpjson(data, destpath):
if not os.path.exists(os.path.dirname(destpath)):
os.makedirs(os.path.dirname(destpath))
with codecs.open(destpath, 'w', 'utf-8') as w:
json.dump(data, w)
def get_users(next_cursor=None):
# this function may fail if the number of following is higher than 3000
# because of the API limit
if next_cursor:
ret = tw.friends.list(count=200, skip_status=True, cursor=next_cursor)
else:
ret = tw.friends.list(count=200, skip_status=True)
users = ret['users']
if ret['next_cursor']: # if pages remain, get the next page
users.extend(get_users(ret['next_cursor']))
return users
def main():
cred = tw.account.verify_credentials(
include_entities=False, skip_satus=False, include_email=False)
screenname = cred["screen_name"]
users = get_users()
# dump with filename "<screen_name>_<time>.json"
strtime = "{:%Y%m%d-%H%M%S}".format(datetime.datetime.now())
filename = "{}_{}.json".format(screenname, strtime)
dest = os.path.join(JSONDUMPDIR, filename)
dumpjson(users, dest)
print("{} has {} user(s). dumped to {}".format(screenname, len(users), dest))
if __name__ == '__main__':
main() |
23,020 | dde1e80087af4cfe57849d818d457159b84ec2e8 | from graph_scanner import GraphScanner
from general_token import GeneralToken, GeneralTokenType
from errors import GeneralError
from delimiters import Delimiters
class InitScanner(GraphScanner):
def __init__(self, input_str):
GraphScanner.__init__(self, input_str)
self.delimiters = Delimiters()
self.delimiters.append(["{", "}", "(", ")", "[", "]", ",", ":"])
|
23,021 | 7626e8801de248e772d8c77c065f80dbe36b311c | import os
import torch
from torchmd.systems import System
from moleculekit.molecule import Molecule
from torchmd.forcefields.forcefield import ForceField
from torchmd.parameters import Parameters
from torchmd.forces import Forces
from torchmd.integrator import Integrator
from torchmd.wrapper import Wrapper
import numpy as np
from tqdm import tqdm
import argparse
import math
import importlib
from torchmd.integrator import maxwell_boltzmann
from torchmd.utils import save_argparse, LogWriter,LoadFromFile
from torchmd.minimizers import minimize_bfgs
FS2NS=1E-6
def viewFrame(mol, pos, forces):
from ffevaluation.ffevaluate import viewForces
mol.coords[:, :, 0] = pos[0].cpu().detach().numpy()
mol.view(guessBonds=False)
viewForces(mol, forces[0].cpu().detach().numpy()[:, :, None] * 0.01)
def get_args(arguments=None):
parser = argparse.ArgumentParser(description='TorchMD',prefix_chars='--')
parser.add_argument('--conf', type=open, action=LoadFromFile, help='Use a configuration file, e.g. python run.py --conf input.conf')
parser.add_argument('--timestep', default=1, type=float, help='Timestep in fs')
parser.add_argument('--temperature', default=300,type=float, help='Assign velocity from initial temperature in K')
parser.add_argument('--langevin-temperature', default=0,type=float, help='Temperature in K of the thermostat')
parser.add_argument('--langevin-gamma', default=0.1,type=float, help='Langevin relaxation ps^-1')
parser.add_argument('--device', default='cpu', help='Type of device, e.g. "cuda:1"')
parser.add_argument('--structure', default=None, help='Deprecated: Input PDB')
parser.add_argument('--topology', default=None, type=str, help='Input topology')
parser.add_argument('--coordinates', default=None, type=str, help='Input coordinates')
parser.add_argument('--forcefield', default="tests/argon/argon_forcefield.yaml", help='Forcefield .yaml file')
parser.add_argument('--seed',type=int,default=1,help='random seed (default: 1)')
parser.add_argument('--output-period',type=int,default=10,help='Store trajectory and print monitor.csv every period')
parser.add_argument('--save-period',type=int,default=0,help='Dump trajectory to npy file. By default 10 times output-period.')
parser.add_argument('--steps',type=int,default=10000,help='Total number of simulation steps')
parser.add_argument('--log-dir', default='./', help='Log directory')
parser.add_argument('--output', default='output', help='Output filename for trajectory')
parser.add_argument('--forceterms', nargs='+', default="LJ", help='Forceterms to include, e.g. --forceterms Bonds LJ')
parser.add_argument('--cutoff', default=None, type=float, help='LJ/Elec/Bond cutoff')
parser.add_argument('--switch_dist', default=None, type=float, help='Switching distance for LJ')
parser.add_argument('--precision', default='single', type=str, help='LJ/Elec/Bond cutoff')
parser.add_argument('--external', default=None, type=dict, help='External calculator config')
parser.add_argument('--rfa', default=False, action='store_true', help='Enable reaction field approximation')
parser.add_argument('--replicas', type=int, default=1, help='Number of different replicas to run')
parser.add_argument('--extended_system', default=None, type=float, help='xsc file for box size')
parser.add_argument('--minimize', default=None, type=int, help='Minimize the system for `minimize` steps')
parser.add_argument('--exclusions', default=('bonds', 'angles', '1-4'), type=tuple, help='exclusions for the LJ or repulsionCG term')
args = parser.parse_args(args=arguments)
os.makedirs(args.log_dir,exist_ok=True)
save_argparse(args,os.path.join(args.log_dir,'input.yaml'),exclude='conf')
if isinstance(args.forceterms, str):
args.forceterms = [args.forceterms]
if args.steps%args.output_period!=0:
raise ValueError('Steps must be multiple of output-period.')
if args.save_period == 0:
args.save_period = 10*args.output_period
if args.save_period%args.output_period!=0:
raise ValueError('save-period must be multiple of output-period.')
return args
precisionmap = {'single': torch.float, 'double': torch.double}
def setup(args):
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
#TODO: We might want to set TF32 to false by default
#torch.backends.cuda.matmul.allow_tf32 = False
#torch.backends.cudnn.allow_tf32 = False
device = torch.device(args.device)
if args.topology is not None:
mol = Molecule(args.topology)
elif args.structure is not None:
mol = Molecule(args.structure)
mol.box = np.array([mol.crystalinfo['a'],mol.crystalinfo['b'],mol.crystalinfo['c']]).reshape(3, 1).astype(np.float32)
if args.coordinates is not None:
mol.read(args.coordinates)
if args.extended_system is not None:
mol.read(args.extended_system)
precision = precisionmap[args.precision]
print("Force terms: ",args.forceterms)
ff = ForceField.create(mol, args.forcefield)
parameters = Parameters(ff, mol, args.forceterms, precision=precision, device=device)
external = None
if args.external is not None:
externalmodule = importlib.import_module(args.external["module"])
embeddings = torch.tensor(args.external["embeddings"]).repeat(args.replicas, 1)
external = externalmodule.External(args.external["file"], embeddings, device)
system = System(mol.numAtoms, args.replicas, precision, device)
system.set_positions(mol.coords)
system.set_box(mol.box)
system.set_velocities(maxwell_boltzmann(parameters.masses, args.temperature, args.replicas))
forces = Forces(parameters, terms=args.forceterms, external=external, cutoff=args.cutoff, rfa=args.rfa, switch_dist=args.switch_dist, exclusions=args.exclusions)
return mol, system, forces
def dynamics(args, mol, system, forces):
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
device = torch.device(args.device)
integrator = Integrator(system, forces, args.timestep, device, gamma=args.langevin_gamma, T=args.langevin_temperature)
wrapper = Wrapper(mol.numAtoms, mol.bonds if len(mol.bonds) else None, device)
outputname, outputext = os.path.splitext(args.output)
trajs = []
logs = []
for k in range(args.replicas):
logs.append(LogWriter(args.log_dir,keys=('iter','ns','epot','ekin','etot','T'), name=f'monitor_{k}.csv'))
trajs.append([])
if args.minimize != None:
minimize_bfgs(system, forces, steps=args.minimize)
iterator = tqdm(range(1,int(args.steps/args.output_period)+1))
Epot = forces.compute(system.pos, system.box, system.forces)
for i in iterator:
# viewFrame(mol, system.pos, system.forces)
Ekin, Epot, T = integrator.step(niter=args.output_period)
wrapper.wrap(system.pos, system.box)
currpos = system.pos.detach().cpu().numpy().copy()
for k in range(args.replicas):
trajs[k].append(currpos[k])
if (i*args.output_period) % args.save_period == 0:
np.save(os.path.join(args.log_dir, f"{outputname}_{k}{outputext}"), np.stack(trajs[k], axis=2)) #ideally we want to append
logs[k].write_row({'iter':i*args.output_period,'ns':FS2NS*i*args.output_period*args.timestep,'epot':Epot[k],
'ekin':Ekin[k],'etot':Epot[k]+Ekin[k],'T':T[k]})
if __name__ == "__main__":
args = get_args()
mol, system, forces = setup(args)
dynamics(args, mol, system, forces)
|
23,022 | 76a72c9f72899f16117800f933b299c01872565b | from django.conf.urls import patterns, include, url
from django.contrib import admin
from fb_chatbot import views
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'chatbot.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
#url(r'^hello/', include('fb_chatbot.urls')),
url(r'', include('fb_chatbot.urls')),
url(r'^new-ticket/$', views.new_ticket),
url(r'^tickets/$', views.tickets),
url(r'^customers/$', views.all_customers),
url(r'^new-customer/$', views.new_customer),
url(r'^tickets/(?P<fbid>[_.%&+0-9a-zA-Z ]+)/$', views.find_tickets),
)
|
23,023 | 716e27fc8baede10dda996f37a0b0f71479278d4 | #!/usr/bin/env python
#coding:utf-8
#使用OrderedDict函数实现一个先进先出的字典,当容量超出限制时,先删除最早添加的key
from collections import OrderedDict
class LastUpdatedOrderedDict(OrderedDict):
def __init__(self,capacity):
super(LastUpdatedOrderedDict,self).__init__()
self._capacity = capacity
def __setitem__(self,key,value):
containsKey = 1 if key in self else 0
if len(self) - containsKey >= self._capacity:
last = self.popitem(last=False)
print 'remove:',last
if containsKey:
del self[key]
print 'set:',(key,value)
else:
print 'add:',(key,value)
OrderedDict.__setitem__(self,key,value)
fifo_dic = LastUpdatedOrderedDict(5)
fifo_dic['a'] = 1
fifo_dic['b'] = 2
fifo_dic['c'] = 3
fifo_dic['d'] = 4
fifo_dic['e'] = 5
print fifo_dic
fifo_dic['f'] = 6
print fifo_dic
|
23,024 | b718f404b705ddc4beb81312fcc21fdb7557f55f | '''
Created on 28 Nov 2013
@author: jlucas, fsaracino
'''
import os
import sys
import logging
import time
#import re
import bz2
import MySQLdb as mydb
# =============================================================================
# DEFINE LOCAL PATHS
# =============================================================================
try:
os.environ['PL1TESTBENCH_ROOT_FOLDER']
except KeyError:
os.environ['PL1TESTBENCH_ROOT_FOLDER'] = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-4])
print ">> os.environ['PL1TESTBENCH_ROOT_FOLDER']=%s" % os.environ['PL1TESTBENCH_ROOT_FOLDER']
else:
pass
sys.path.append(os.sep.join(os.environ['PL1TESTBENCH_ROOT_FOLDER'].split(os.sep)[:]+['common', 'report', 'csv']))
sys.path.append(os.sep.join(os.environ['PL1TESTBENCH_ROOT_FOLDER'].split(os.sep)[:]+['common', 'config']))
sys.path.append(os.sep.join(os.environ['PL1TESTBENCH_ROOT_FOLDER'].split(os.sep)[:]+['common', 'utils']))
sys.path.append(os.sep.join(os.environ['PL1TESTBENCH_ROOT_FOLDER'].split(os.sep)[:]+['common', 'icera']))
# =============================================================================
# IMPORT USER DEFINED LIBRARY
# =============================================================================
from CfgError import CfgError
from icera_utils import parseModemInfo, getBranch, getPlatform, getVariant
# =============================================================================
# GLOBAL VARIABLES
# =============================================================================
# =============================================================================
# DATABASE API FUNCTIONS
# =============================================================================
def mySqlCheckPermission(host, dbname, uid, pwd):
logger=logging.getLogger('mySqlCheckPermission')
logger.debug("db_params: name %s, uid %s, host %s" % (dbname, uid, host))
try:
# Get database connection object instance
db_h=DatabaseMySqlPl1Testbench(host, dbname, uid, pwd)
# Open database connection
db_h.database_connect()
db_h.cursor.execute("SHOW GRANTS for current_user;")
result=db_h.cursor.fetchall()
grant_all_db = "ALL PRIVILEGES ON `%s`" %dbname
grant_all_all = "ALL PRIVILEGES ON *.*"
grant_select_db = "SELECT ON `%s`" %dbname
grant_select_all = "SELECT ON *.*"
grant = ""
for res in result:
grant = "%s %s" %(grant,res)
if ( (grant_select_db in grant) or (grant_select_all in grant)):
res = "READ_ONLY"
elif ( (grant_all_db in grant) or (grant_all_all in grant)):
res = "READ_WRITE"
else:
res = None
logger.debug("Available access to DB: %s@%s for User: %s is %s." %(dbname, host, uid, res))
db_h.database_disconnect()
return res
except :
logger.warning("ACCESS to Database is not available.")
return None
def mySqlGetVersion(host, dbname, uid, pwd):
logger=logging.getLogger('mySqlGetVersion')
logger.debug("db_params: name %s, uid %s, host %s" % (dbname, uid, host))
db_h = None
ver = None
try:
# Get database connection object instance
db_h=DatabaseMySqlPl1Testbench(host, dbname, uid, pwd)
db_h.database_connect()
db_h.cursor = db_h.conn.cursor()
db_h.cursor.execute("SELECT VERSION()")
ver=db_h.cursor.fetchone()
logger.info("Database version : %s " % ver)
except mydb.Error, e:
logger.error("Error %d: %s" % (e.args[0],e.args[1]))
sys.exit(CfgError.ERRCODE_SYS_DATABASE_ERROR)
finally:
if (not db_h is None):
db_h.database_disconnect()
return ver
# =============================================================================
# DATABASE STRUCTURE FOR PERFORMANCE MEASUREMENTS
# =============================================================================
class DatabaseMySqlPl1Testbench(object):
DATABASE_NAME = 'pl1testbench'
TABLE_PLATFORMS = 'platforms'
TABLE_TESTINFOS = 'testinfos'
TABLE_TESTRUNS = 'testruns'
TABLE_LTE_PARAMS = 'lte_params'
TABLE_LTE_RESULTS = 'lte_results'
TABLE_WCDMA_PARAMS = 'wcdma_params'
TABLE_WCDMA_RESULTS = 'wcdma_results'
TABLE_GSM_PARAMS = 'gsm_params'
TABLE_GSM_RESULTS = 'gsm_results'
name = None
conn = None
cursor = None
host = None
uid = None
pwd = None
def __init__ (self, host, dbname, uid, pwd):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.__init__')
self.host = host
self.name = dbname
self.uid = uid
self.pwd = pwd
# Note: database MUST be created
try:
logger.debug("Checking MySQL database : (host=%s, dbname=%s, uid=%s)" % (host, dbname, uid))
self.database_connect()
self.cursor = self.conn.cursor()
except :
logger.error("Error database not found: %s" % self.name)
sys.exit(CfgError.ERRCODE_SYS_DATABASE_ERROR)
else:
pass
# =============================================================================
# DATABASE ADMIN FUNCTIONS
# =============================================================================
def database_destroy(self):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.database_destroy')
try:
# Get database connection object instance
self.cursor.execute("DROP DATABASE %s;" % self.name)
logger.info("Database deleted : %s " % self.name)
except mydb.Error, e:
logger.error("Error %d: %s" % (e.args[0],e.args[1]))
print sys.exc_info()
sys.exit(CfgError.ERRCODE_SYS_DATABASE_ERROR)
finally:
pass
def database_tables_init(self):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.database_tables_init')
if 0: self.database_tables_drop()
self.database_tables_insert()
logger.debug("initialised database : %s" % self.name)
def database_tables_insert(self):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.database_tables_insert')
try:
if not self.table_exists(self.name, self.TABLE_PLATFORMS):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS %s (platform_id INTEGER PRIMARY KEY AUTO_INCREMENT,
platform TEXT NOT NULL,
aux_info TEXT NULL);""" % self.TABLE_PLATFORMS)
logger.debug("created table : %s" % self.TABLE_PLATFORMS)
if not self.table_exists(self.name, self.TABLE_TESTINFOS):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS %s (testinfo_id INTEGER PRIMARY KEY AUTO_INCREMENT,
testid INTEGER UNSIGNED NOT NULL,
rat ENUM('LTE_FDD', 'LTE_FDD_CA', 'LTE_TDD', 'LTE_TDD_CA', 'WCDMA', 'GSM') NOT NULL,
testtype TEXT NOT NULL,
descr TEXT);""" % self.TABLE_TESTINFOS)
logger.debug("created table : %s" % self.TABLE_TESTINFOS)
if not self.table_exists(self.name, self.TABLE_TESTRUNS):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS %s ( testrun_id INTEGER PRIMARY KEY AUTO_INCREMENT,
timestamp TIMESTAMP NOT NULL,
branch TEXT NULL,
clnum INTEGER UNSIGNED NULL,
mod_files INTEGER UNSIGNED NULL,
p4webrev TEXT NULL,
testerinfo TEXT NULL,
modeminfo TEXT NULL);""" % self.TABLE_TESTRUNS)
logger.debug("created table : %s" % self.TABLE_TESTRUNS)
if not self.table_exists(self.name, self.TABLE_LTE_PARAMS):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS %s (param_id INTEGER PRIMARY KEY AUTO_INCREMENT,
testinfo_id INTEGER NOT NULL,
carrier TEXT NOT NULL,
dmode TEXT NOT NULL,
dlulconf INTEGER NULL,
ssconf INTEGER NULL,
bwmhz REAL NOT NULL,
rfband INTEGER NOT NULL,
earfcn INTEGER NOT NULL,
cp TEXT NOT NULL,
tm INTEGER UNSIGNED NOT NULL,
txants INTEGER UNSIGNED NOT NULL,
pmi INTEGER UNSIGNED NULL,
rsepre REAL NOT NULL,
pa INTEGER NOT NULL,
pb INTEGER NOT NULL,
chtype TEXT NULL,
snr REAL NULL,
doppler REAL NULL,
schedtype TEXT NOT NULL,
nhrtx INTEGER UNSIGNED NULL,
riv TEXT NULL,
dlmcs INTEGER UNSIGNED NULL,
dlnprb INTEGER UNSIGNED NULL,
dlrbstart INTEGER UNSIGNED NULL,
ulmcs INTEGER UNSIGNED NULL,
ulnprb INTEGER UNSIGNED NULL,
ulrbstart INTEGER UNSIGNED NULL,
FOREIGN KEY (testinfo_id) REFERENCES %s(testinfo_id));""" % (self.TABLE_LTE_PARAMS, self.TABLE_TESTINFOS))
logger.debug("created table LTE: %s" % self.TABLE_LTE_PARAMS)
if not self.table_exists(self.name, self.TABLE_LTE_RESULTS):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS %s (result_id INTEGER PRIMARY KEY AUTO_INCREMENT,
testrun_id INTEGER NOT NULL,
platform_id INTEGER NOT NULL,
param_id INTEGER NOT NULL,
param_pcc_id INTEGER NOT NULL,
valid BOOL NOT NULL,
dlrely INTEGER,
dlthr_Mbps REAL,
dlthr_min_Mbps REAL,
dlthr_max_Mbps REAL,
dlbler REAL,
cqi INTEGER UNSIGNED,
ack INTEGER UNSIGNED,
nack INTEGER UNSIGNED,
dtx INTEGER UNSIGNED,
sf_total INTEGER UNSIGNED,
sf_scheduled INTEGER UNSIGNED,
ulrely INTEGER UNSIGNED NULL,
ulthr_Mbps REAL NULL,
ulbler REAL NULL,
crc_pass INTEGER UNSIGNED NULL,
crc_fail INTEGER UNSIGNED NULL,
best_dlthr_Mbps REAL NULL,
best_ulthr_Mbps REAL NULL,
tolerance TEXT NULL,
verdict_dl TEXT NULL,
verdict_ul TEXT NULL,
voltage_V REAL NULL,
current_mA REAL NULL,
power_mW REAL NULL,
rank TEXT NULL,
dlthr_cw1 TEXT NULL,
dlthr_cw2 TEXT NULL,
cqi_cw1 TEXT NULL,
cqi_cw2 TEXT NULL,
pmi_ri1 TEXT NULL,
pmi_ri2 TEXT NULL,
harq_cw1 TEXT NULL,
harq_cw2 TEXT NULL,
FOREIGN KEY (testrun_id) REFERENCES %s(testrun_id),
FOREIGN KEY (param_id) REFERENCES %s(param_id),
FOREIGN KEY (platform_id) REFERENCES %s(platform_id));""" % (self.TABLE_LTE_RESULTS, self.TABLE_TESTRUNS, self.TABLE_LTE_PARAMS, self.TABLE_PLATFORMS))
logger.debug("created table : %s" % self.TABLE_LTE_RESULTS)
if not self.table_exists(self.name, self.TABLE_WCDMA_PARAMS):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS %s (param_id INTEGER PRIMARY KEY AUTO_INCREMENT,
testinfo_id INTEGER NOT NULL,
rfband INTEGER NOT NULL,
uarfcn INTEGER NOT NULL,
chtype TEXT NOT NULL,
datarate TEXT NOT NULL,
snr REAL NOT NULL,
power REAL NOT NULL,
txant INTEGER UNSIGNED NOT NULL,
sched_type TEXT,
modulation TEXT,
ki INTEGER DEFAULT 0,
num_hsdsch_codes INTEGER DEFAULT 0,
cpich_power REAL DEFAULT 0.0,
hspdsch_power REAL DEFAULT 0.0,
snr_2 REAL DEFAULT 0.0,
power_2 REAL DEFAULT 0.0,
modulation_2 TEXT,
ki_2 INTEGER DEFAULT 0,
num_hsdsch_codes_2 INTEGER DEFAULT 0,
FOREIGN KEY (testinfo_id) REFERENCES %s(testinfo_id));""" % (self.TABLE_WCDMA_PARAMS, self.TABLE_TESTINFOS))
logger.debug("created table : %s" % self.TABLE_WCDMA_PARAMS)
if not self.table_exists(self.name, self.TABLE_WCDMA_RESULTS):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS %s (result_id INTEGER PRIMARY KEY AUTO_INCREMENT,
testrun_id INTEGER NOT NULL,
platform_id INTEGER NOT NULL,
param_id INTEGER NOT NULL,
dlrely INTEGER DEFAULT 0,
dl_ber REAL DEFAULT 0.0,
dl_bler REAL DEFAULT 0.0,
lost_blocks INTEGER DEFAULT 0,
pdn_discontinuity INTEGER DEFAULT 0,
num_sf INTEGER DEFAULT 0,
dl_target_thput REAL DEFAULT 0.0,
dl_thput REAL DEFAULT 0.0,
tol REAL DEFAULT 0.0,
cqi INTEGER UNSIGNED DEFAULT 0,
sent REAL DEFAULT 0.0,
ack REAL DEFAULT 0.0,
nack REAL DEFAULT 0.0,
dtx REAL DEFAULT 0.0,
dl_target_thput_2 REAL DEFAULT 0.0,
dl_thput_2 REAL DEFAULT 0.0,
dl_bler_2 REAL DEFAULT 0.0,
cqi_2 INTEGER UNSIGNED DEFAULT 0,
sent_2 REAL DEFAULT 0.0,
ack_2 REAL DEFAULT 0.0,
nack_2 REAL DEFAULT 0.0,
dtx_2 REAL DEFAULT 0.0,
dl_verdict TEXT ,
i_min REAL DEFAULT 0.0,
i_avg REAL DEFAULT 0.0,
i_max REAL DEFAULT 0.0,
i_deviation TINYTEXT,
pwr_min REAL DEFAULT 0.0,
pwr_avg REAL DEFAULT 0.0,
pwr_max REAL DEFAULT 0.0,
FOREIGN KEY (testrun_id) REFERENCES %s(testrun_id),
FOREIGN KEY (param_id) REFERENCES %s(param_id),
FOREIGN KEY (platform_id) REFERENCES %s(platform_id));""" % (self.TABLE_WCDMA_RESULTS, self.TABLE_TESTRUNS, self.TABLE_WCDMA_PARAMS, self.TABLE_PLATFORMS))
logger.debug("created table : %s" % self.TABLE_WCDMA_RESULTS)
if not self.table_exists(self.name, self.TABLE_GSM_PARAMS):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS %s (param_id INTEGER PRIMARY KEY AUTO_INCREMENT,
testinfo_id INTEGER NOT NULL,
rfband INTEGER NOT NULL,
bch_arfcn INTEGER NOT NULL,
bch_pwr_dl REAL NOT NULL,
bch_pcl REAL NOT NULL,
tch_arfcn INTEGER NOT NULL,
tch_pwr_dl REAL NOT NULL,
tbfl TEXT NOT NULL,
chtype TEXT NOT NULL,
snr REAL NULL,
dlmcs TEXT NULL,
ulmcs TEXT NULL,
FOREIGN KEY (testinfo_id) REFERENCES %s(testinfo_id));""" % (self.TABLE_GSM_PARAMS, self.TABLE_TESTINFOS))
logger.debug("created table : %s" % self.TABLE_GSM_PARAMS)
if not self.table_exists(self.name, self.TABLE_GSM_RESULTS):
self.cursor.execute("""CREATE TABLE IF NOT EXISTS %s (result_id INTEGER PRIMARY KEY AUTO_INCREMENT,
testrun_id INTEGER NOT NULL,
platform_id INTEGER NOT NULL,
param_id INTEGER NOT NULL,
valid BOOL NOT NULL,
dlrely INTEGER NOT NULL,
dlbler REAL NOT NULL,
rlc_blocks INTEGER UNSIGNED NOT NULL,
tot_datarate REAL NOT NULL,
thr_ovrall REAL NOT NULL,
thr_per_slot REAL NOT NULL,
noise_sysbw REAL NOT NULL,
noise_totbw REAL NOT NULL,
s_plus_n_totbw REAL NOT NULL,
dlthr_ref REAL NOT NULL,
tolerance REAL NOT NULL,
verdict_dl TEXT NOT NULL,
curr REAL NULL,
volt REAL NULL,
pwr REAL NULL,
FOREIGN KEY (testrun_id) REFERENCES %s(testrun_id),
FOREIGN KEY (param_id) REFERENCES %s(param_id),
FOREIGN KEY (platform_id) REFERENCES %s(platform_id));""" % (self.TABLE_GSM_RESULTS, self.TABLE_TESTRUNS, self.TABLE_GSM_PARAMS, self.TABLE_PLATFORMS))
logger.debug("created table : %s" % self.TABLE_GSM_RESULTS)
except:
logger.error("table insertion failure")
print sys.exc_info()
sys.exit(CfgError.ERRCODE_SYS_DATABASE_ERROR)
else:
self.conn.commit()
def database_tables_drop(self):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.database_tables_drop')
try:
if self.table_exists(self.name, self.TABLE_LTE_RESULTS):
self.cursor.execute("""DROP TABLE %s;""" % self.TABLE_LTE_RESULTS)
logger.debug("dropped table : %s" % self.TABLE_LTE_RESULTS)
if self.table_exists(self.name, self.TABLE_LTE_PARAMS):
self.cursor.execute("""DROP TABLE %s;""" % self.TABLE_LTE_PARAMS)
logger.debug("dropped table : %s" % self.TABLE_LTE_PARAMS)
if self.table_exists(self.name, self.TABLE_PLATFORMS):
self.cursor.execute("""DROP TABLE %s;""" % self.TABLE_PLATFORMS)
logger.debug("dropped table : %s" % self.TABLE_PLATFORMS)
if self.table_exists(self.name, self.TABLE_TESTINFOS):
self.cursor.execute("""DROP TABLE %s;""" % self.TABLE_TESTINFOS)
logger.debug("dropped table : %s" % self.TABLE_TESTINFOS)
if self.table_exists(self.name, self.TABLE_TESTRUNS):
self.cursor.execute("""DROP TABLE %s;""" % self.TABLE_TESTRUNS)
logger.debug("dropped table : %s" % self.TABLE_TESTRUNS)
except:
logger.error("table drop failure")
print sys.exc_info()
sys.exit(CfgError.ERRCODE_SYS_DATABASE_ERROR)
else:
self.conn.commit()
# =============================================================================
# DATABASE USER FUNCTIONS
# =============================================================================
def database_connect(self):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.database_connect')
if self.conn is None:
self.conn = mydb.connect(self.host, self.uid, bz2.decompress(self.pwd), self.name)
logger.debug("Connected to database : %s" % self.name)
else:
logger.debug("Self.con is not None")
def database_disconnect(self):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.database_disconnect')
if not self.conn is None:
self.conn.close()
logger.debug("Disconnected from database : %s" % self.name)
# =============================================================================
# TABLE SPECIFIC FUNCTIONS
# =============================================================================
def table_exists(self, dbname, tablename):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.table_exists')
try:
if self.conn==None:
self.database_connect()
self.cursor.execute("SELECT count(*) FROM information_schema.tables WHERE table_schema = %s AND table_name = %s ;", (dbname, tablename))
result=(self.cursor.fetchone()[0] > 0)
if result:
logger.debug("table=%s found in database=%s" % (tablename, dbname))
else:
logger.debug("table=%s not found in database=%s" % (tablename, dbname))
except mydb.OperationalError:
logger.error("TABLE %s.table_exists()" % tablename)
raise mydb.OperationalError
print sys.exc_info()
return False
else:
return result
def table_view(self, dbname, tablename):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.table_view')
try:
if self.table_exists(dbname, tablename):
cursor_len=self.cursor.execute("SELECT * FROM %s;""" % (tablename))
idx=0
while (idx<cursor_len):
data = self.cursor.fetchone()
logger.info("%s[%s] : %s" % (tablename, idx, data))
idx += 1
if not idx:
logger.warning('empty table : %s' % tablename)
#else:
# logger.warning("table %s not found" % tablename)
except mydb.OperationalError:
logger.error("error on table : %s" % tablename)
print sys.exc_info()
raise mydb.OperationalError
else:
pass
def table_describe(self, dbname, tablename):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.table_describe')
try:
if self.table_exists(dbname, tablename):
cursor_len=self.cursor.execute("DESCRIBE %s;""" % (tablename))
idx=0
while (idx<cursor_len):
data = self.cursor.fetchone()
logger.info("%s[%s] : %s" % (tablename, idx, data))
idx += 1
if not idx:
logger.warning('empty table : %s' % tablename)
#else:
# logger.warning("table %s not found" % tablename)
except mydb.OperationalError:
logger.error("error on table : %s" % tablename)
print sys.exc_info()
raise mydb.OperationalError
else:
pass
# =============================================================================
# TABLE ENTRY FUNCTIONS
# =============================================================================
def get_platform_id(self, platform):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.get_platform_id')
self.cursor.execute("SELECT platform_id FROM platforms WHERE platform=%s", (platform,))
result=self.cursor.fetchone()[0]
logger.debug("Platform %s has ID %d",(platform),result)
return result
def add_platform(self, platform):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.add_platform')
try:
return self.get_platform_id(platform)
except TypeError:
self.cursor.execute("INSERT INTO platforms(platform) VALUES (%s)",(platform,))
logger.debug("Created platform record for %s...",(platform))
return self.get_platform_id(platform)
def get_testinfo_id(self, testid):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.get_testinfo_id')
self.cursor.execute("SELECT testinfo_id FROM testinfos WHERE testid=%s", (testid,))
result=self.cursor.fetchone()[0]
logger.debug("testid %s has ID %d", testid, result)
return result
def add_testinfo(self, testid, rat, testtype, descr):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.add_testinfo')
try:
return self.get_testinfo_id(testid)
except TypeError:
self.cursor.execute("INSERT INTO testinfos(testid, rat, testtype, descr) VALUES (%s,%s,%s,%s)", (testid, rat, testtype, descr))
logger.debug("Created testinfo record for %s...",(testid, rat, testtype, descr))
return self.get_testinfo_id(testid)
def get_testrun_id(self, timestamp, branch, clnum, mod_files, p4webrev):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.get_testrun_id')
GET_QUERY = "SELECT testrun_id FROM testruns WHERE timestamp='%s'" % timestamp
GET_QUERY += (" AND branch='%s'" % branch) if not branch is None else " AND branch is NULL"
GET_QUERY += (" AND clnum='%s'" % clnum) if not clnum is None else " AND clnum is NULL"
GET_QUERY += (" AND mod_files='%s'" % mod_files) if not mod_files is None else " AND mod_files is NULL"
GET_QUERY += (" AND p4webrev='%s'" % p4webrev) if not p4webrev is None else " AND p4webrev is NULL"
GET_QUERY += (";")
if 1: logger.debug(GET_QUERY)
#self.cursor.execute("SELECT testrun_id FROM testruns WHERE timestamp=%s AND branch=%s AND clnum=%s AND mod_files=%s AND p4webrev=%s", (timestamp, branch, clnum, mod_files, p4webrev))
self.cursor.execute(GET_QUERY)
result=self.cursor.fetchone()[0]
logger.debug("Test run %s has ID %d",(timestamp, branch, clnum, mod_files, p4webrev), result)
return result
def add_testrun(self, timestamp, branch, clnum, mod_files, p4webrev, testerinfo, modeminfo):
logger=logging.getLogger('DatabaseMySqlPl1Testbench.add_testrun')
try:
return self.get_testrun_id(timestamp, branch, clnum, mod_files, p4webrev)
except TypeError:
logger.debug("Create testrun record for %s", (timestamp, branch, clnum, mod_files, p4webrev, testerinfo, modeminfo))
self.cursor.execute("INSERT INTO testruns(timestamp, branch, clnum, mod_files, p4webrev, testerinfo, modeminfo) VALUES (%s,%s,%s,%s,%s,%s,%s)",(timestamp, branch, clnum, mod_files, p4webrev, testerinfo, modeminfo))
return self.get_testrun_id(timestamp, branch, clnum, mod_files, p4webrev)
def __str__(self):
print "---------------------------------------"
print " file_database : %s" % self.name
print " conn : %s" % self.conn
print " cursor : %s" % self.cursor
return ""
if __name__ == '__main__':
from cfg_multilogging import cfg_multilogging
loglevel = 'DEBUG'
logname = logname= os.path.splitext(os.path.basename(__file__))[0]
logfile = logname + '.LOG'
cfg_multilogging(loglevel, logfile)
logger=logging.getLogger(logname)
sys.path.append(os.sep.join(os.environ['PL1TESTBENCH_ROOT_FOLDER'].split(os.sep)[:]+['common', 'struct']))
from Struct import Struct
t0=time.localtime()
# Define folders hierarchy
dir_root =os.sep.join(os.environ['PL1TESTBENCH_ROOT_FOLDER'].split(os.sep)[:])
dir_export =os.sep.join(dir_root.split(os.sep)[:]+['common','report','mysql', 'database', 'export'])
dir_import =os.sep.join(dir_root.split(os.sep)[:]+['common','report','mysql', 'database', 'import'])
logger.info("------------------------------------")
logger.info("dir_root : %s" % dir_root)
logger.info("dir_export : %s" % dir_import)
logger.info("dir_export : %s" % dir_export)
logger.info("------------------------------------")
if 1:
# local database acces
host = 'localhost'
dbname = 'pl1testbench'
uid = 'root'
pwd = 'BZh91AY&SY!-o\xae\x00\x00\x01\x01\x80\x00\x00\x94\x00 \x000\xcc\x0c\xf5\x06qw$S\x85\t\x02\x12\xd6\xfa\xe0'
if 1:
logger.debug("---------------------------------------")
logger.debug(">> Checking access permissions:")
logger.debug("---------------------------------------")
mySqlCheckPermission(host, dbname, uid, pwd)
mySqlGetVersion(host, dbname, uid, pwd)
if 1:
logger.debug("---------------------------------------")
logger.debug(">> Init database tables:")
logger.debug("---------------------------------------")
db_h=DatabaseMySqlPl1Testbench(host, dbname, uid, pwd)
db_h.database_tables_init()
db_h.database_disconnect()
del db_h
if 1:
logger.debug("---------------------------------------")
logger.debug(">> Checking table existence:")
logger.debug("---------------------------------------")
db_h=DatabaseMySqlPl1Testbench(host, dbname, uid, pwd)
db_h.table_exists(dbname, db_h.TABLE_PLATFORMS)
db_h.table_exists(dbname, db_h.TABLE_TESTINFOS)
db_h.table_exists(dbname, db_h.TABLE_TESTRUNS)
db_h.table_exists(dbname, db_h.TABLE_LTE_PARAMS)
db_h.table_exists(dbname, db_h.TABLE_LTE_RESULTS)
#db_h.table_exists(dbname, db_h.TABLE_GSM_PARAMS)
#db_h.table_exists(dbname, db_h.TABLE_GSM_RESULTS)
db_h.database_disconnect()
del db_h
if 1:
logger.debug("---------------------------------------")
logger.debug(">> Checking table view:")
logger.debug("---------------------------------------")
db_h=DatabaseMySqlPl1Testbench(host, dbname, uid, pwd)
db_h.table_view(dbname, db_h.TABLE_PLATFORMS)
db_h.table_view(dbname, db_h.TABLE_TESTINFOS)
db_h.table_view(dbname, db_h.TABLE_TESTRUNS)
db_h.table_view(dbname, db_h.TABLE_LTE_PARAMS)
db_h.table_view(dbname, db_h.TABLE_LTE_RESULTS)
#db_h.table_view(dbname, db_h.TABLE_GSM_PARAMS)
#db_h.table_view(dbname, db_h.TABLE_GSM_RESULTS)
db_h.database_disconnect()
del db_h
if 1:
logger.debug("---------------------------------------")
logger.debug(">> Retrieve table description:")
logger.debug("---------------------------------------")
db_h=DatabaseMySqlPl1Testbench(host, dbname, uid, pwd)
db_h.table_describe(dbname, db_h.TABLE_PLATFORMS)
db_h.table_describe(dbname, db_h.TABLE_TESTINFOS)
db_h.table_describe(dbname, db_h.TABLE_TESTRUNS)
db_h.table_describe(dbname, db_h.TABLE_LTE_PARAMS)
db_h.table_describe(dbname, db_h.TABLE_LTE_RESULTS)
del db_h
t1=time.localtime()
dt=time.mktime(t1)-time.mktime(t0)
logger.info("Time duration %d[sec]" % dt)
|
23,025 | d9ffb610b906eff869269119152a196f8d95ff69 | print('%'*32)
print('========== TEMPERATURA =========')
i = float(input('Digitte a temperatura °C: '))
f = ((9*i)/5)+32
print('temperatura de {} °C \ncoreespondente a {} °F! '.format(i, f))
print('='*32) |
23,026 | 98437a45cab58233db9803aba1e2f62795ab44c6 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Collection'
db.create_table('collections_collection', (
('brick_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['bricks.Brick'], unique=True, primary_key=True)),
('template_name', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('order_by', self.gf('django.db.models.fields.CharField')(max_length=32)),
))
db.send_create_signal('collections', ['Collection'])
# Adding model 'CollectionObject'
db.create_table('collections_collectionobject', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('collection', self.gf('django.db.models.fields.related.ForeignKey')(related_name='collection_objects', to=orm['collections.Collection'])),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('from_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, null=True, blank=True)),
('to_date', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True, blank=True)),
('order', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'])),
('object_id', self.gf('django.db.models.fields.PositiveIntegerField')()),
))
db.send_create_signal('collections', ['CollectionObject'])
def backwards(self, orm):
# Deleting model 'Collection'
db.delete_table('collections_collection')
# Deleting model 'CollectionObject'
db.delete_table('collections_collectionobject')
models = {
'bricks.brick': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Brick'},
'add_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mod_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'picture': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['images.Image']", 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'collections.collection': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Collection', '_ormbases': ['bricks.Brick']},
'brick_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['bricks.Brick']", 'unique': 'True', 'primary_key': 'True'}),
'order_by': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'collections.collectionobject': {
'Meta': {'object_name': 'CollectionObject'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'collection_objects'", 'to': "orm['collections.Collection']"}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'from_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'to_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'images.image': {
'Meta': {'ordering': "('-pub_date',)", 'object_name': 'Image', '_ormbases': ['bricks.Brick']},
'brick_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['bricks.Brick']", 'unique': 'True', 'primary_key': 'True'}),
'height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'image': ('bricks.images.fields.CropImageField', [], {'size_field': "'size'", 'max_length': '256'}),
'meta': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['collections'] |
23,027 | 2cf641990786f16d87ecbed01dbcf3c443686955 | import sys
import unittest
from re import escape
import pexpect
from readchar import key
from inquirer.themes import Default as Theme
@unittest.skipUnless(sys.platform.startswith("lin"), "Linux only")
class CheckTest(unittest.TestCase):
def setUp(self):
self.sut = pexpect.spawn("python examples/checkbox.py")
self.sut.expect("History.*", timeout=1)
def test_default_input(self):
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \['Computers', 'Books'\]}.*", timeout=1) # noqa
def test_select_the_third(self):
self.sut.send(key.DOWN)
self.sut.send(key.DOWN)
self.sut.send(key.SPACE)
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \['Computers', 'Books', 'Science'\]}.*", timeout=1) # noqa
def test_select_one_more(self):
self.sut.send(key.DOWN)
self.sut.send(key.DOWN)
self.sut.send(key.SPACE)
self.sut.send(key.DOWN)
self.sut.send(key.SPACE)
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \['Computers', 'Books', 'Science', 'Nature'\]}.*", timeout=1) # noqa
def test_unselect(self):
self.sut.send(key.SPACE)
self.sut.send(key.SPACE)
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \['Books', 'Computers'\]}.*", timeout=1) # noqa
def test_select_with_arrows(self):
self.sut.send(key.DOWN)
self.sut.send(key.DOWN)
self.sut.send(key.RIGHT)
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \['Computers', 'Books', 'Science'\]}.*", timeout=1) # noqa
def test_unselect_with_arrows(self):
self.sut.send(key.DOWN)
self.sut.send(key.LEFT)
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \['Computers'\]}.*", timeout=1) # noqa
def test_select_last(self):
for i in range(10):
self.sut.send(key.DOWN)
self.sut.send(key.SPACE)
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \['Computers', 'Books', 'History'\]}.*", timeout=1) # noqa
def test_select_all_with_ctrl_a(self):
self.sut.send(key.CTRL_A)
self.sut.send(key.ENTER)
self.sut.expect(
r"{'interests': \['Computers', 'Books', 'Science', 'Nature', 'Fantasy', 'History'\]}.*", timeout=1
) # noqa
def test_reset_with_ctrl_r(self):
self.sut.send(key.CTRL_R)
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \[\]}.*", timeout=1) # noqa
def test_default_invert_selection_with_ctrl_i(self):
self.sut.send(key.CTRL_I)
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \['Science', 'Nature', 'Fantasy', 'History'\]}.*", timeout=1) # noqa
@unittest.skipUnless(sys.platform.startswith("lin"), "Linux only")
class CheckCarouselTest(unittest.TestCase):
def setUp(self):
self.sut = pexpect.spawn("python examples/checkbox_carousel.py")
self.sut.expect("Computers.*", timeout=1)
def test_out_of_bounds_up(self):
self.sut.send(key.UP)
self.sut.expect("History.*", timeout=1)
self.sut.send(key.SPACE)
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \['Computers', 'Books', 'History'\]}.*", timeout=1) # noqa
def test_out_of_bounds_down(self):
for i in range(6):
self.sut.send(key.DOWN)
# Not looking at what we expect along the way,
# let the last "expect" check that we got the right result
self.sut.expect(">.*", timeout=1)
self.sut.send(key.SPACE)
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \['Books'\]}.*", timeout=1) # noqa
@unittest.skipUnless(sys.platform.startswith("lin"), "Linux only")
class CheckOtherTest(unittest.TestCase):
def setUp(self):
self.theme = Theme()
self.sut = pexpect.spawn("python examples/checkbox_other.py")
self.sut.expect("Computers.*", timeout=1)
def test_other_input(self):
self.sut.send(key.UP)
self.sut.expect(r"\+ Other.*", timeout=1)
self.sut.send(key.SPACE)
self.sut.expect(r": ", timeout=1)
self.sut.send("Hello world")
self.sut.expect(r"Hello world.*", timeout=1)
self.sut.send(key.ENTER)
self.sut.expect(rf"> {escape(self.theme.Checkbox.selected_icon)} Hello world[\s\S]*\+ Other.*", timeout=1)
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \['Computers', 'Books', 'Hello world'\]}", timeout=1) # noqa
def test_other_blank_input(self):
self.sut.send(key.UP)
self.sut.expect(r"\+ Other.*", timeout=1)
self.sut.send(key.SPACE)
self.sut.expect(r": ", timeout=1)
self.sut.send(key.ENTER) # blank input
self.sut.expect(r"> \+ Other.*", timeout=1)
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \['Computers', 'Books'\]}", timeout=1) # noqa
def test_other_select_choice(self):
self.sut.send(key.SPACE)
self.sut.expect(rf"{escape(self.theme.Checkbox.unselected_icon)} Computers.*", timeout=1)
self.sut.send(key.ENTER)
self.sut.expect(r"{'interests': \['Books'\]}", timeout=1) # noqa
@unittest.skipUnless(sys.platform.startswith("lin"), "Linux only")
class CheckWithTaggedValuesTest(unittest.TestCase):
def setUp(self):
self.sut = pexpect.spawn("python examples/checkbox_tagged.py")
self.sut.expect("History.*", timeout=1)
def test_default_selection(self):
self.sut.send(key.ENTER)
self.sut.expect("{'interests': ", timeout=1)
@unittest.skipUnless(sys.platform.startswith("lin"), "Linux only")
class CheckLockedTest(unittest.TestCase):
def setUp(self):
self.sut = pexpect.spawn("python examples/checkbox_locked.py")
self.sut.expect("DevOps.*", timeout=1)
def test_default_selection(self):
self.sut.send(key.ENTER)
self.sut.expect(r"{'courses': \['Programming fundamentals'\]}", timeout=1)
def test_locked_option_space(self):
self.sut.send(key.SPACE)
self.sut.send(key.ENTER)
self.sut.expect(r"{'courses': \['Programming fundamentals'\]}", timeout=1)
def test_locked_option_left_key(self):
self.sut.send(key.LEFT)
self.sut.send(key.ENTER)
self.sut.expect(r"{'courses': \['Programming fundamentals'\]}", timeout=1)
def test_locked_with_another_option(self):
self.sut.send(key.DOWN)
self.sut.send(key.DOWN)
self.sut.send(key.SPACE)
self.sut.send(key.ENTER)
self.sut.expect(r"{'courses': \['Programming fundamentals', 'Data science'\]}", timeout=1)
|
23,028 | 95c765f28efb9fb928b7fb7204ebce808ea9f906 | import cola
class Dependencia:
#clase que indica en donde se debe atender al paciente con su nombre y la sala de espera que es una cola de pacientes
def __init__(self, nombre):
self.nombre = nombre
self.sala = cola.Cola()
def atender_pacientes(self):
if self.sala.es_vacia():
pass
#print('la sala de ' + self.nombre + ' esta vacia ')
else:
paciente=self.sala.desencolar()
print('Se ha atendido al paciente ' + paciente.nombre + ' en ' + self.nombre)
self.atender_pacientes()
def agregar_a_sala(self, paciente):
self.sala.encolar(paciente) |
23,029 | 8c5cde82f0308aa47e714f6629743c2f423d7457 | from model import Model
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
from dataloader import notMNIST
import matplotlib.pyplot as plt
from parameters import MODEL_NAME, N_EPOCHS, BATCH_SIZE
root = os.path.dirname(__file__)
# Instantiating the notMNIST dataset class we created
train_dataset = notMNIST(os.path.join(root, 'Dataset/Train'))
print("Loaded data")
# Creating a dataloader
train_loader = DataLoader(dataset=train_dataset, batch_size=256, shuffle=True)
# Instantiating the model, loss function and optimizer
net = Model()
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters())
loss_history = []
def train(epoch):
epoch_loss = 0
n_batches = len(train_dataset) // BATCH_SIZE
for step, data in enumerate(train_loader, 0):
train_x, train_y = data
y_hat = net.forward(train_x)
train_y = torch.Tensor(np.array(train_y))
# CrossEntropyLoss requires arg2 to be torch.LongTensor
loss = criterion(y_hat, train_y.long())
epoch_loss += loss.item()
optimizer.zero_grad()
# Backpropagation
loss.backward()
optimizer.step()
# There are len(dataset)/BATCH_SIZE batches.
# We print the epoch loss when we reach the last batch.
if step % n_batches == 0 and step != 0:
epoch_loss = epoch_loss / n_batches
loss_history.append(epoch_loss)
print("Epoch {}, loss {}".format(epoch, epoch_loss))
epoch_loss = 0
for epoch in range(1, N_EPOCHS + 1):
train(epoch)
# Saving the model
torch.save(net, 'models/{}.pt'.format(MODEL_NAME))
print("Saved model...")
# Plotting loss vs number of epochs
plt.plot(np.array(range(1, N_EPOCHS + 1)), loss_history)
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.show()
|
23,030 | 8b33a8437707eed3d6ac1b04b4a22df9b4cc0817 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest import TestCase
from tests.utils import assert_equal_feature_processors, assert_equal_layers
from polyaxon_schemas.ml.processing.feature_processors import FeatureProcessorsConfig
from polyaxon_schemas.ml.processing.image import (
AdjustBrightnessConfig,
AdjustContrastConfig,
AdjustGammaConfig,
AdjustHueConfig,
AdjustSaturationConfig,
CentralCropConfig,
ConvertColorSpaceConfig,
ConvertImagesDtypeConfig,
DrawBoundingBoxesConfig,
ExtractGlimpseConfig,
FlipConfig,
RandomCropConfig,
ResizeConfig,
Rotate90Config,
StandardizationConfig,
ToBoundingBoxConfig,
TotalVariationConfig,
TransposeConfig
)
from polyaxon_schemas.ml.processing.pipelines import (
BasePipelineConfig,
ImageCaptioningPipelineConfig,
ParallelTextPipelineConfig,
TFRecordImagePipelineConfig,
TFRecordSequencePipelineConfig,
TFRecordSourceSequencePipelineConfig
)
class TestFeatureProcessorsConfigs(TestCase):
def test_feature_processors(self):
config_dict = {
'image1': {
'input_layers': ['image'],
'output_layers': ['reshap_0'],
'layers': [
{'Resize': {'height': 28, 'width': 28}},
{'Reshape': {'target_shape': [784]}}
]
},
'image2': {
'input_layers': ['image'],
'output_layers': ['reshap_0'],
'layers': [
{'Standardization': {}},
{'Resize': {'height': 28, 'width': 28}},
{'Reshape': {'target_shape': [784]}}
]
}
}
config = FeatureProcessorsConfig.from_dict(config_dict)
config_to_dict = config.to_dict()
assert_equal_feature_processors(config_to_dict, config_dict)
class TestPipelinesConfigs(TestCase):
def test_base_pipeline_config(self):
config_dict = {
'name': 'my_pipelne',
'num_epochs': 10,
'shuffle': True,
'feature_processors': {
'image': {
'input_layers': ['image'],
'output_layers': ['reshap_0'],
'layers': [
{'Resize': {'height': 28, 'width': 28}},
{'Reshape': {'target_shape': [784]}}
]
},
}
}
config = BasePipelineConfig.from_dict(config_dict)
config_to_dict = config.to_dict()
assert config_to_dict['name'] == config_dict['name']
assert config_to_dict['num_epochs'] == config_dict['num_epochs']
assert config_to_dict['shuffle'] == config_dict['shuffle']
assert_equal_feature_processors(config_to_dict['feature_processors'],
config_dict['feature_processors'])
def test_tf_record_image_pipeline_config(self):
config_dict = {
'batch_size': 64,
'num_epochs': 10,
'shuffle': True,
'data_files': ['train_data_file'],
'meta_data_file': 'meta_data_file',
'feature_processors': {
'image': {
'input_layers': ['image'],
'output_layers': ['reshap_0'],
'layers': [
{'Resize': {'height': 28, 'width': 28}},
{'Reshape': {'target_shape': [784]}}
]
},
}
}
config = TFRecordImagePipelineConfig.from_dict(config_dict)
config_to_dict = config.to_dict()
assert config_to_dict['num_epochs'] == config_dict['num_epochs']
assert config_to_dict['shuffle'] == config_dict['shuffle']
assert config_to_dict['data_files'] == config_dict['data_files']
assert config_to_dict['meta_data_file'] == config_dict['meta_data_file']
assert_equal_feature_processors(config_to_dict['feature_processors'],
config_dict['feature_processors'])
def test_tf_record_sequence_pipeline_config(self):
config_dict = {
'batch_size': 64,
'num_epochs': 10,
'shuffle': True,
'data_files': ['train_data_file'],
'meta_data_file': 'meta_data_file',
}
config = TFRecordSequencePipelineConfig.from_dict(config_dict)
config_to_dict = config.to_dict()
assert config_to_dict['num_epochs'] == config_dict['num_epochs']
assert config_to_dict['shuffle'] == config_dict['shuffle']
assert config_to_dict['data_files'] == config_dict['data_files']
assert config_to_dict['meta_data_file'] == config_dict['meta_data_file']
def test_parallel_text_pipeline_config(self):
config_dict = {
'batch_size': 64,
'num_epochs': 10,
'shuffle': True,
'source_files': ['source_data_file'],
'target_files': ['target_data_file'],
}
config = ParallelTextPipelineConfig.from_dict(config_dict)
config_to_dict = config.to_dict()
assert config_to_dict['num_epochs'] == config_dict['num_epochs']
assert config_to_dict['shuffle'] == config_dict['shuffle']
assert config_to_dict['source_files'] == config_dict['source_files']
assert config_to_dict['target_files'] == config_dict['target_files']
assert config_to_dict['source_delimiter'] == ""
assert config_to_dict['target_delimiter'] == ""
def test_tf_record_source_sequence_pipeline_config(self):
config_dict = {
'batch_size': 64,
'num_epochs': 10,
'shuffle': True,
'files': ['source_data_file'],
'source_field': 'source',
'target_field': 'target'
}
config = TFRecordSourceSequencePipelineConfig.from_dict(config_dict)
config_to_dict = config.to_dict()
assert config_to_dict['num_epochs'] == config_dict['num_epochs']
assert config_to_dict['shuffle'] == config_dict['shuffle']
assert config_to_dict['files'] == config_dict['files']
assert config_to_dict['source_field'] == config_dict['source_field']
assert config_to_dict['target_field'] == config_dict['target_field']
assert config_to_dict['source_delimiter'] == ""
assert config_to_dict['target_delimiter'] == ""
def test_image_captioning_pipeline_config(self):
config_dict = {
'batch_size': 64,
'num_epochs': 10,
'shuffle': True,
'files': ['source_data_file'],
'image_field': 'image/data',
'image_format': 'jpg',
'caption_ids_field': 'image/caption_ids',
'caption_tokens_field': 'image/caption'
}
config = ImageCaptioningPipelineConfig.from_dict(config_dict)
config_to_dict = config.to_dict()
assert config_to_dict['num_epochs'] == config_dict['num_epochs']
assert config_to_dict['shuffle'] == config_dict['shuffle']
assert config_to_dict['files'] == config_dict['files']
assert config_to_dict['image_field'] == config_dict['image_field']
assert config_to_dict['image_format'] == config_dict['image_format']
assert config_to_dict['caption_ids_field'] == config_dict['caption_ids_field']
assert config_to_dict['caption_tokens_field'] == config_dict['caption_tokens_field']
class TestImageProcessingConfigs(TestCase):
def test_resize_config(self):
config_dict = {
'height': 28,
'width': 28,
'method': 0,
'align_corners': True,
'name': 'Resize'
}
config = ResizeConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_central_crop_config(self):
config_dict = {
'central_fraction': 0.28,
'name': 'CentralCrop'
}
config = CentralCropConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_random_crop_config(self):
config_dict = {
'height': 28,
'width': 28,
'name': 'RandomCrop'
}
config = RandomCropConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_extract_glimpse_config(self):
config_dict = {
'size': [1, 1],
'offsets': [1, 1],
'centered': True,
'normalized': True,
'uniform_noise': True,
'name': 'ExtractGlimpse'
}
config = ExtractGlimpseConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_to_bounding_box_config(self):
config_dict = {
'offset_height': 1,
'offset_width': 1,
'target_height': 10,
'target_width': 10,
'method': 'crop',
'name': 'ToBoundingBox'
}
config = ToBoundingBoxConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_flip_config(self):
config_dict = {
'axis': 0,
'is_random': False,
'seed': None,
'name': 'Flip'
}
config = FlipConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_transpose_config(self):
config_dict = {
'name': 'Transpose'
}
config = TransposeConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_rotate_config(self):
config_dict = {
'k': 0,
'is_random': False,
'seed': None,
'name': 'Rotate90'
}
config = Rotate90Config.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_convert_color_space_config(self):
config_dict = {
'from_space': 'rgb',
'to_space': 'grayscale',
'name': 'ConvertColorSpace'
}
config = ConvertColorSpaceConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_convert_image_dtype_config(self):
config_dict = {
'dtype': 'float32',
'saturate': True,
'name': 'ConvertImagesDtype'
}
config = ConvertImagesDtypeConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_adjust_brightness_config(self):
config_dict = {
'delta': 1.3,
'is_random': True,
'seed': 1000,
'name': 'AdjustBrightness'
}
config = AdjustBrightnessConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_adjust_contrast_config(self):
config_dict = {
'contrast_factor': 1.3,
'contrast_factor_max': None,
'is_random': False,
'seed': 1000,
'name': 'AdjustContrast'
}
config = AdjustContrastConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_adjust_hue_config(self):
config_dict = {
'delta': 0.3,
'is_random': True,
'seed': 1000,
'name': 'AdjustHue'
}
config = AdjustHueConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_adjust_saturation_config(self):
config_dict = {
'saturation_factor': 0.3,
'saturation_factor_max': None,
'is_random': True,
'seed': 1000,
'name': 'AdjustSaturation'
}
config = AdjustSaturationConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_adjust_gamma_config(self):
config_dict = {
'gamma': 0.3,
'gain': 1,
'name': 'AdjustGamma'
}
config = AdjustGammaConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_standardization_config(self):
config_dict = {
'name': 'Standardization'
}
config = StandardizationConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_draw_bounding_boxes_config(self):
config_dict = {
'boxes': [0, 3, 3],
'name': 'DrawBoundingBoxes'
}
config = DrawBoundingBoxesConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
def test_total_variation_config(self):
config_dict = {
'name': 'TotalVariation'
}
config = TotalVariationConfig.from_dict(config_dict)
assert_equal_layers(config, config_dict)
|
23,031 | d060f1206d1e64059239b349470152c9cfc1983d | #!/usr/bin/env python
"""
This example shows how to work with coordinate transformations, curvilinear
coordinates and a little bit with differential geometry.
It takes polar, cylindrical, spherical, rotating disk coordinates and others
and calculates all kinds of interesting properties, like Jacobian, metric
tensor, Laplace operator, ...
"""
from sympy import var, sin, cos, pprint, Matrix, eye, trigsimp, Eq, \
Function, simplify, sinh, cosh, expand, symbols
def laplace(f, g_inv, g_det, X):
"""
Calculates Laplace(f), using the inverse metric g_inv, the determinant of
the metric g_det, all in variables X.
"""
r = 0
for i in range(len(X)):
for j in range(len(X)):
r += g_inv[i, j]*f.diff(X[i]).diff(X[j])
for sigma in range(len(X)):
for alpha in range(len(X)):
r += g_det.diff(X[sigma]) * g_inv[sigma, alpha] * \
f.diff(X[alpha]) / (2*g_det)
return r
def transform(name, X, Y, *, g_correct=None, recursive=False):
"""
Transforms from cartesian coordinates X to any curvilinear coordinates Y.
It printing useful information, like Jacobian, metric tensor, determinant
of metric, Laplace operator in the new coordinates, ...
g_correct ... if not None, it will be taken as the metric --- this is
useful if sympy's trigsimp() is not powerful enough to
simplify the metric so that it is usable for later
calculation. Leave it as None, only if the metric that
transform() prints is not simplified, you can help it by
specifying the correct one.
recursive ... apply recursive trigonometric simplification (use only when
needed, as it is an expensive operation)
"""
print("_"*80)
print("Transformation:", name)
for x, y in zip(X, Y):
pprint(Eq(y, x))
J = X.jacobian(Y)
print("Jacobian:")
pprint(J)
g = J.T*eye(J.shape[0])*J
g = g.applyfunc(expand)
print("metric tensor g_{ij}:")
pprint(g)
if g_correct is not None:
g = g_correct
print("metric tensor g_{ij} specified by hand:")
pprint(g)
print("inverse metric tensor g^{ij}:")
g_inv = g.inv(method="ADJ")
g_inv = g_inv.applyfunc(simplify)
pprint(g_inv)
print("det g_{ij}:")
g_det = g.det()
pprint(g_det)
f = Function("f")(*list(Y))
print("Laplace:")
pprint(laplace(f, g_inv, g_det, Y))
def main():
mu, nu, rho, theta, phi, sigma, tau, a, t, x, y, z, w = symbols(
"mu, nu, rho, theta, phi, sigma, tau, a, t, x, y, z, w")
transform("polar", Matrix([rho*cos(phi), rho*sin(phi)]), [rho, phi])
transform("cylindrical", Matrix([rho*cos(phi), rho*sin(phi), z]),
[rho, phi, z])
transform("spherical",
Matrix([rho*sin(theta)*cos(phi), rho*sin(theta)*sin(phi),
rho*cos(theta)]),
[rho, theta, phi],
recursive=True
)
transform("rotating disk",
Matrix([t,
x*cos(w*t) - y*sin(w*t),
x*sin(w*t) + y*cos(w*t),
z]),
[t, x, y, z])
transform("parabolic",
Matrix([sigma*tau, (tau**2 - sigma**2) / 2]),
[sigma, tau])
transform("bipolar",
Matrix([a*sinh(tau)/(cosh(tau)-cos(sigma)),
a*sin(sigma)/(cosh(tau)-cos(sigma))]),
[sigma, tau]
)
transform("elliptic",
Matrix([a*cosh(mu)*cos(nu), a*sinh(mu)*sin(nu)]),
[mu, nu]
)
if __name__ == "__main__":
main()
|
23,032 | e9db496e5b427ba6e75373a2e98d627211315fad | '''
문제
월드전자는 노트북을 제조하고 판매하는 회사이다. 노트북 판매 대수에 상관없이 매년 임대료, 재산세,
보험료, 급여 등 A만원의 고정 비용이 들며, 한 대의 노트북을 생산하는 데에는 재료비와 인건비 등 총 B만원의 가변 비용이 든다고 한다.
예를 들어 A=1,000, B=70이라고 하자. 이 경우 노트북을 한 대 생산하는 데는 총 1,070만원이 들며, 열 대 생산하는 데는 총 1,700만원이 든다.
노트북 가격이 C만원으로 책정되었다고 한다. 일반적으로 생산 대수를 늘려 가다 보면 어느 순간 총 수입(판매비용)이 총
비용(=고정비용+가변비용)보다 많아지게 된다. 최초로 총 수입이 총 비용보다 많아져 이익이 발생하는 지점을 손익분기점(BREAK-EVEN POINT)이라고 한다.
A, B, C가 주어졌을 때, 손익분기점을 구하는 프로그램을 작성하시오.
입력
첫째 줄에 A, B, C가 빈 칸을 사이에 두고 순서대로 주어진다. A, B, C는 21억 이하의 자연수이다.
출력
첫 번째 줄에 손익분기점 즉 최초로 이익이 발생하는 판매량을 출력한다. 손익분기점이 존재하지 않으면 -1을 출력한다.
'''
'''
#1번코드 (맞았습니다)
A, B, C= map(int, input().split())
x=1
if C<=B:
print(-1)
else:
print(int(A/(C-B)+x))
'''
'''
#2번 코드 (제출 시, 시간초과 됨)
A, B, C= map(int, input().split())
x=1
if C<=B:
print(-1)
else:
while A>(C-B)*x:
x=x+1
print(int(x))
'''
|
23,033 | fbf4192389d7275c4ba75c9589350ca3f14e079b | from model.ActionType import ActionType
from model.Game import Game
from model.Move import Move
from model.Wizard import Wizard
from model.World import World
class MyStrategy:
def move(self, me, world, game, move):
"""
@type me: Wizard
@type world: World
@type game: Game
@type move: Move
"""
move.speed = game.wizard_forward_speed
move.strafe_speed = game.wizard_strafe_speed
move.turn = game.wizard_max_turn_angle
move.action = ActionType.MAGIC_MISSILE
|
23,034 | d6bce5b10e1702c98a2a7e58fdd0972a6787119c | import random
type = input("Do you want to manually change what the bounds are?\n Answer with(y/n)").upper()
while type != "Y" and type != "N":
type = input("Do you want to manually change what the bounds are?\n Answer with(y/n)").upper()
if type == "Y":
up = int(input("What do you want the upper bound to be?"))
low = int(input("What do you want the lower bound to be?"))
num = random.randint(low, up)
print(num)
guess = int(input("Guess a number between your ranges"))
gum = 0
for i in range(4):
if guess < num:
print("Guess higher")
gum = gum + 1
guess = int(input("Guess a number between your ranges"))
elif guess > num:
print("Guess lower")
gum = gum + 1
guess = int(input("Guess a number between your ranges"))
if type == "N":
mode = input("Type EASY if you wish to play easy, or HARD if you wish to play hard")
while mode != "HARD" and mode != "EASY":
mode = input("Type EASY if you wish to play easy, or HARD if you wish to play hard")
if mode == "EASY":
num = random.randint(1, 10)
print(num)
guess = int(input("Guess a number from 1-10"))
gum = 0
for i in range (4):
if guess < num:
print("Guess higher")
gum = gum + 1
guess = int(input("Guess a number from 1-10"))
elif guess > num:
print("Guess lower")
gum = gum + 1
guess = int(input("Guess a number from 1-10"))
if mode == "HARD":
num = random.randint(1,100)
print(num)
guess = int(input("Guess a number from 1-100"))
gum = 0
for i in range (4):
if guess < num:
print("Guess higher")
gum = gum + 1
guess = int(input("Guess a number from 1-100"))
elif guess > num:
print("Guess lower")
gum = gum + 1
guess = int(input("Guess a number from 1-100"))
if gum == 4:
print("That's too many tries")
if gum < 4:
print("You did it, good job!")
print("It took you ",gum + 1,"tries to answers correct.")
|
23,035 | 1d2c7772206b124f1d889096b678f95c17ed208b | import msg, display
msg.msg_method()
display.display_method()
|
23,036 | 9d146c6077be1c56f80f56313b785fae2eadcce0 |
int soma(int a, int b){
return a + b;
}
void aumenta(int &num){
num = num + 1;
}
void aumenta(int *end){
*end = *end + 1;
print end
}
|
23,037 | a54d8acf5ba04c5a7ea58c71644129d44aed2931 | import time
print("Empieza")
time.sleep(3)
print("¡Ahora!")
y=-(time.time())
print("Presiona enter cuando quieras que termine")
input()
x=(time.time())
x=y+x
print(x) |
23,038 | 6defeca24cb5d27d5d2ef447f5bf85d56ae95a6a |
class CustomException(Exception):
pass
class CustomErrorMessages:
INVALID_CREDENTIALS = 'INVALID_CREDENTIALS'
TOKEN_EXPIRED = 'TOKEN_EXPIRED'
TOKEN_INVALID = 'TOKEN_INVALID'
USER_NOT_FOUND = 'USER_NOT_FOUND'
USER_ALREADY_EXISTS = 'USER_ALREADY_EXISTS'
NOT_ALLOWED_FOR_PROFILE = 'NOT_ALLOWED_FOR_PROFILE'
ARTICLE_NOT_FOUND = 'ARTICLE_NOT_FOUND'
UNEXPECTED_ERROR = 'UNEXPECTED_ERROR'
SOLR_COMMUNICATION_FAILURE = 'SOLR_COMMUNICATION_FAILURE'
TOKEN_MISSING = 'TOKEN_MISSING'
|
23,039 | ea3864b00d23d1ed31297dabf3c3333fdf6b6fdb | class Solution(object):
def summaryRanges(self, nums):
"""
:type nums: List[int]
:rtype: List[str]
"""
intervals = []
for num in nums:
if not intervals or num-intervals[-1][1]>1:
intervals.append([num,num])
else:
intervals[-1][1] = num
for i in range(len(intervals)):
if intervals[i][0] == intervals[i][1]:
intervals[i] = str(intervals[i][0])
else:
intervals[i] = str(intervals[i][0]) + "->" + str(intervals[i][1])
return intervals |
23,040 | 4c8e52f7263385bca89b9d702ab7fdac27a9c703 | import os
number_of_threads = 1
os.environ["OMP_NUM_THREADS"] = str(number_of_threads) # export OMP_NUM_THREADS=1
os.environ["OPENBLAS_NUM_THREADS"] = str(number_of_threads) # export OPENBLAS_NUM_THREADS=1
os.environ["MKL_NUM_THREADS"] = str(number_of_threads) # export MKL_NUM_THREADS=1
os.environ["VECLIB_MAXIMUM_THREADS"] = str(number_of_threads) # export VECLIB_MAXIMUM_THREADS=1
os.environ["NUMEXPR_NUM_THREADS"] = str(number_of_threads) # export NUMEXPR_NUM_THREADS=1
import h5py
from tqdm import tqdm
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
import pandas as pd
import tables
from datetime import datetime
from Widefield_Utils import widefield_utils
# Remove This Later
import warnings
warnings.filterwarnings("ignore")
def repackage_data_into_dataframe(pixel_activity, pixel_metadata):
# Combine_Into Dataframe
dataframe = pd.DataFrame(dtype=np.float64)
dataframe["Data_Value"] = pixel_activity
dataframe["Group"] = pixel_metadata[:, 0]
dataframe["Mouse"] = pixel_metadata[:, 1]
dataframe["Session"] = pixel_metadata[:, 2]
dataframe["Condition"] = pixel_metadata[:, 3]
return dataframe
def mixed_effects_random_slope_and_intercept(dataframe):
model = sm.MixedLM.from_formula("Data_Value ~ Condition", dataframe, re_formula="Condition", groups=dataframe["Mouse"])
model_fit = model.fit()
parameters = model_fit.params
group_slope = parameters[1]
p_value = model_fit.pvalues["Condition"]
return p_value, group_slope
def view_learning_raw_difference(tensor_directory, analysis_name, vmin=-0.05, vmax=0.05):
# Open Analysis Dataframe
analysis_file = tables.open_file(os.path.join(tensor_directory, analysis_name + "_Trialwise_.h5"), mode="r")
activity_dataset = analysis_file.root["Data"]
metadata_dataset = analysis_file.root["Trial_Details"]
number_of_trials, number_of_timepoints, number_of_pixels = np.shape(activity_dataset)
# Load Mask
indicies, image_height, image_width = widefield_utils.load_tight_mask()
indicies, image_height, image_width = widefield_utils.downsample_mask_further(indicies, image_height, image_width)
print("metadata_dataset", np.shape(metadata_dataset))
print("activity_dataset", np.shape(activity_dataset))
# Load AS Array
print("Starting opening", datetime.now())
activity_dataset = np.array(activity_dataset)
print("Finished opening", datetime.now())
# Split By Condition
condition_details = metadata_dataset[:, 2]
condition_1_indicies = np.where(condition_details == 0)[0]
condition_2_indicies = np.where(condition_details == 1)[0]
condition_3_indicies = np.where(condition_details == 2)[0]
condition_1_data = activity_dataset[condition_1_indicies]
condition_2_data = activity_dataset[condition_2_indicies]
condition_3_data = activity_dataset[condition_3_indicies]
print("Condition 1 data", np.shape(condition_1_data))
print("condition 2 data", np.shape(condition_2_data))
print("condition 3 data", np.shape(condition_3_data))
# Get MEans
condition_1_data = np.mean(condition_1_data, axis=0)
condition_2_data = np.mean(condition_2_data, axis=0)
condition_3_data = np.mean(condition_3_data, axis=0)
# Load Colourmap
colourmap = widefield_utils.get_musall_cmap()
plt.ion()
figure_1 = plt.figure()
for timepoint_index in tqdm(range(number_of_timepoints), position=0, desc="Timepoint"):
condition_1_axis = figure_1.add_subplot(1, 4, 1)
condition_2_axis = figure_1.add_subplot(1, 4, 2)
condition_3_axis = figure_1.add_subplot(1, 4, 3)
diff_axis = figure_1.add_subplot(1, 4, 4)
# Recreate Images
condition_1_image = widefield_utils.create_image_from_data(condition_1_data[timepoint_index], indicies, image_height, image_width)
condition_2_image = widefield_utils.create_image_from_data(condition_2_data[timepoint_index], indicies, image_height, image_width)
condition_3_image = widefield_utils.create_image_from_data(condition_3_data[timepoint_index], indicies, image_height, image_width)
diff_image = np.subtract(condition_3_image, condition_2_image)
# Plot These
condition_1_axis.imshow(condition_1_image, cmap=colourmap, vmin=vmin, vmax=vmax)
condition_2_axis.imshow(condition_2_image, cmap=colourmap, vmin=vmin, vmax=vmax)
condition_3_axis.imshow(condition_3_image, cmap=colourmap, vmin=vmin, vmax=vmax)
diff_axis.imshow(diff_image, cmap=colourmap, vmin=vmin*0.5, vmax=vmax*0.5)
plt.title(str(timepoint_index))
plt.draw()
plt.pause(0.1)
plt.clf()
plt.ioff()
window = list(range(100,114))
figure_1 = plt.figure()
condition_1_axis = figure_1.add_subplot(1, 3, 1)
condition_2_axis = figure_1.add_subplot(1, 3, 2)
diff_axis = figure_1.add_subplot(1, 3, 3)
# Get Average
condition_1_average = np.mean(condition_2_data[window], axis=0)
condition_2_average = np.mean(condition_3_data[window], axis=0)
# Recreate Images
condition_1_image = widefield_utils.create_image_from_data(condition_1_average, indicies, image_height, image_width)
condition_2_image = widefield_utils.create_image_from_data(condition_2_average, indicies, image_height, image_width)
diff_image = np.subtract(condition_2_image, condition_1_image)
# Plot These
vmin=-0.02
vmax=0.02
condition_1_axis.imshow(condition_1_image, cmap=colourmap, vmin=vmin, vmax=vmax)
condition_2_axis.imshow(condition_2_image, cmap=colourmap, vmin=vmin, vmax=vmax)
diff_axis.imshow(diff_image, cmap=colourmap, vmin=vmin * 0.5, vmax=vmax * 0.5)
plt.show()
def view_raw_difference(tensor_directory, analysis_name, vmin=-0.05, vmax=0.05):
# Open Analysis Dataframe
analysis_file = tables.open_file(os.path.join(tensor_directory, analysis_name + "_Trialwise_.h5"), mode="r")
activity_dataset = analysis_file.root["Data"]
metadata_dataset = analysis_file.root["Trial_Details"]
number_of_trials, number_of_timepoints, number_of_pixels = np.shape(activity_dataset)
# Load Mask
indicies, image_height, image_width = widefield_utils.load_tight_mask()
indicies, image_height, image_width = widefield_utils.downsample_mask_further( indicies, image_height, image_width)
print("metadata_dataset", np.shape(metadata_dataset))
print("activity_dataset", np.shape(activity_dataset))
# Load AS Array
print("Starting opening", datetime.now())
activity_dataset = np.array(activity_dataset)
print("Finished opening", datetime.now())
# Split By Condition
condition_details = metadata_dataset[:, 3]
condition_1_indicies = np.where(condition_details == 0)[0]
condition_2_indicies = np.where(condition_details == 1)[0]
condition_1_data = activity_dataset[condition_1_indicies]
condition_2_data = activity_dataset[condition_2_indicies]
print("Condition 1 data", np.shape(condition_1_data))
print("condition 2 data", np.shape(condition_2_data))
# Get MEans
condition_1_data = np.mean(condition_1_data, axis=0)
condition_2_data = np.mean(condition_2_data, axis=0)
# Load Colourmap
colourmap = widefield_utils.get_musall_cmap()
for timepoint_index in tqdm(range(number_of_timepoints), position=0, desc="Timepoint"):
figure_1 = plt.figure()
condition_1_axis = figure_1.add_subplot(1,3,1)
condition_2_axis = figure_1.add_subplot(1, 3, 2)
diff_axis = figure_1.add_subplot(1, 3, 3)
# Recreate Images
condition_1_image = widefield_utils.create_image_from_data(condition_1_data[timepoint_index], indicies, image_height, image_width)
condition_2_image = widefield_utils.create_image_from_data(condition_2_data[timepoint_index], indicies, image_height, image_width)
# Plot These
condition_1_axis.imshow(condition_1_image, cmap=colourmap, vmin=vmin, vmax=vmax)
condition_2_axis.imshow(condition_2_image, cmap=colourmap, vmin=vmin, vmax=vmax)
diff_axis.imshow(np.subtract(condition_1_image, condition_2_image), cmap=colourmap, vmin=-0.02, vmax=0.02)
plt.title(str(timepoint_index))
plt.show()
def test_significance_individual_timepoints(tensor_directory, analysis_name):
"""
This Test Is Run Pixelwise - All Brains Must Be In Same Pixel Space
:return:
Tensor of P Values
"""
"""
# Open Analysis Dataframe
analysis_file = h5py.File(os.path.join(tensor_directory, analysis_name + ".hdf5"), "r")
activity_dataset = analysis_file["Data"]
metadata_dataset = analysis_file["metadata"]
number_of_timepoints, number_of_trials, number_of_pixels = np.shape(activity_dataset)
print("metadata_dataset", np.shape(metadata_dataset))
"""
# Open Analysis Dataframe
analysis_file = tables.open_file(os.path.join(tensor_directory, analysis_name + "_Trialwise_.h5"), mode="r")
activity_dataset = analysis_file.root["Data"]
metadata_dataset = analysis_file.root["Trial_Details"]
# Create P and Slope Tensors
p_value_tensor = np.ones((number_of_timepoints, number_of_pixels))
slope_tensor = np.zeros((number_of_timepoints, number_of_pixels))
for timepoint_index in tqdm(range(number_of_timepoints), position=0, desc="Timepoint"):
# Get Timepoint Data
timepoint_activity = activity_dataset[timepoint_index]
for pixel_index in tqdm(range(number_of_pixels), position=1, desc="Pixel", leave=True):
# Package Into Dataframe
pixel_activity = timepoint_activity[:, pixel_index]
pixel_dataframe = repackage_data_into_dataframe(pixel_activity, metadata_dataset)
# Fit Mixed Effects Model
p_value, slope = mixed_effects_random_slope_and_intercept(pixel_dataframe)
p_value_tensor[timepoint_index, pixel_index] = p_value
slope_tensor[timepoint_index, pixel_index] = slope
# Save These Tensors
np.save(os.path.join(tensor_directory, analysis_name + "_p_value_tensor.npy"), p_value_tensor)
np.save(os.path.join(tensor_directory, analysis_name + "_slope_tensor.npy"), slope_tensor)
def test_significance_window(tensor_directory, analysis_name, window):
"""
This Test Is Run Pixelwise - All Brains Must Be In Same Pixel Space
:return:
Tensor of P Values
"""
"""
# Open Analysis Dataframe
analysis_file = h5py.File(os.path.join(tensor_directory, analysis_name + ".hdf5"), "r")
activity_dataset = analysis_file["Data"]
metadata_dataset = analysis_file["metadata"]
number_of_timepoints, number_of_trials, number_of_pixels = np.shape(activity_dataset)
print("metadata_dataset", np.shape(metadata_dataset))
"""
# Open Analysis Dataframe
analysis_file = tables.open_file(os.path.join(tensor_directory, analysis_name + "_Trialwise_.h5"), mode="r")
activity_dataset = analysis_file.root["Data"]
metadata_dataset = analysis_file.root["Trial_Details"]
activity_dataset = np.array(activity_dataset)
metadata_dataset = np.array(metadata_dataset)
activity_dataset = np.nan_to_num(activity_dataset)
number_of_trials, number_of_timepoints, number_of_pixels = np.shape(activity_dataset)
print("Number of timepoints", number_of_timepoints)
print("number of pixels", number_of_pixels)
print("number of trials", number_of_trials)
# Create P and Slope Tensors
p_value_tensor = np.ones(number_of_pixels)
slope_tensor = np.zeros(number_of_pixels)
# Get Timepoint Data
timepoint_activity = activity_dataset[:, window]
print("Timepoint activity shape", np.shape(timepoint_activity))
timepoint_activity = np.mean(timepoint_activity, axis=1)
for pixel_index in tqdm(range(number_of_pixels), position=1, desc="Pixel", leave=False):
# Package Into Dataframe
pixel_activity = timepoint_activity[:, pixel_index]
pixel_dataframe = repackage_data_into_dataframe(pixel_activity, metadata_dataset)
# Fit Mixed Effects Model
p_value, slope = mixed_effects_random_slope_and_intercept(pixel_dataframe)
p_value_tensor[pixel_index] = p_value
slope_tensor[pixel_index] = slope
return p_value_tensor, slope_tensor
"""
# Load Analysis Details
analysis_name = "Unrewarded_Contextual_Modulation"
tensor_directory = r"/media/matthew/External_Harddrive_2/Control_Switching_Tensors_100"
window = list(range(10,14))
p_value_tensor, slope_tensor = test_significance_window(tensor_directory, analysis_name, window)
indicies, image_height, image_width = widefield_utils.load_tight_mask()
indicies, image_height, image_width = widefield_utils.downsample_mask_further(indicies, image_height, image_width)
slope_map = widefield_utils.create_image_from_data(slope_tensor, indicies, image_height, image_width)
plt.imshow(slope_map)
plt.show()
p_map = widefield_utils.create_image_from_data(p_value_tensor, indicies, image_height, image_width)
p_map = np.nan_to_num(p_map)
plt.imshow(p_map)
plt.show()
""" |
23,041 | d25dd0bb23503df15daad0b909f372873fa56930 | from struct import pack, unpack
from http import extract_http
import socket
# checksum functions needed for calculation checksum
"""
def carry_around_add(a, b):
c = a + b
return (c & 0xffff) + (c >> 16)
def checksum(msg):
s = 0
for i in range(0, len(msg), 2):
w = ord(msg[i]) + (ord(msg[i+1]) << 8)
s = carry_around_add(s, w)
# print 'i: '+ str(i)
return ~s & 0xffff
"""
def checksum(msg):
s = 0
for i in range(0, len(msg), 2):
# w = ord(msg[i]) + (ord(msg[i+1]) << 8)
w = (ord(msg[i]) << 8) + ord(msg[i+1])
s += w
s = (s >> 16) + (s & 0xffff)
s += (s >> 16)
return ~s & 0xffff
def CreateBatch(file):
Array = []
fpcap = open(file, 'rb')
text = fpcap.read()
index = 0
count = 0
pcap_header_fmt = ['gmt_time', 'micro_time', 'pcap_len', 'len']
ip_header_fmt = ['version,ihl', 'tos', 'tot_len', 'id', 'frag_off', 'ttl', 'protocol', 'check', 'saddr', 'daddr']
tcp_header_fmt = ['src_port', 'dst_port', 'seq_no', 'ack_no', 'tcp_offset_res', 'tcp_flags', 'window', 'cksum', 'urg_pt']
# pcap file head
global_head = unpack('IHHIIII', text[index:24 + index])
index += 24
while index < len(text):
packet_head = unpack('IIII', text[index:16 + index])
pcap_head_dict = dict(zip(pcap_header_fmt, packet_head))
index += 16
pcap_len = pcap_head_dict['pcap_len']
# skb is all the packet data
skb = text[index: pcap_len + index]
# ip head
ip_head = unpack('!BBHHHBBHII', skb[14:34])
ip_head_dict = dict(zip(ip_header_fmt, ip_head))
ip_head_length = (ip_head_dict['version,ihl'] & 0xF) * 4
# filter tcp head
ports = unpack('!HH', skb[14+ip_head_length:14+ip_head_length+4])
if ports[0] == 80 or ports[1] == 80:
tcp_head = unpack('!HHLLBBHHH', skb[14+ip_head_length:14+ip_head_length+20])
tcp_head_dict = dict(zip(tcp_header_fmt, tcp_head))
offset = tcp_head_dict['tcp_offset_res']
tcp_head_length = 4*(offset >> 4)
# pseudo header fields
placeholder = 0
protocol = ip_head_dict['protocol']
tcp_length = pcap_len-34
psh = pack('!IIBBH', ip_head_dict['saddr'],
ip_head_dict['daddr'], placeholder, protocol, tcp_length)
# skb[14+ip_head_length: 14+ip_head_length+16] + pack('!H', 0) + skb[14+ip_head_length+18:pcap_len]
cksum_msg = psh + skb[14+ip_head_length: pcap_len]
if len(cksum_msg) % 2 == 1:
cksum_msg += pack('!B', 0)
#if checksum(cksum_msg) == 0: #FOR NOW
#continue
dict_field = {
"measurement":"http",
"tags":{
'sIP': socket.inet_ntoa(pack('!I', ip_head_dict['saddr'])),
'sPort': tcp_head_dict['src_port'],
'dIP': socket.inet_ntoa(pack('!I', ip_head_dict['daddr'])),
'dPort': tcp_head_dict['dst_port'],
'Domain': None,
'URL': None,
'user_agent': None,
'referer': None,
'result_code': None,
'action': None,
'bytes': pcap_len - 14 - ip_head_length - tcp_head_length,
'content-type': None
},
'time': None,
"fields":{
"value":12.0
}
}
# time now in micro seconds for accuracy
dict_field['time'] = int(pcap_head_dict['gmt_time'])*1000000 + int(pcap_head_dict['micro_time'])
if pcap_len > 14+ip_head_length+tcp_head_length:
data = skb[14+ip_head_length+tcp_head_length: pcap_len]
checker = data.find('\r\n')
if(checker != -1):
# ADJUSTED TO FIX LOGICAL ERROR THAT MADE DB WRITE ERRORS
checker2 = data.find('HTTP', 0, checker)
if checker2 != -1:
extract_http(data, dict_field)
Array.append(dict_field)
count+=1
index += pcap_len
return Array
|
23,042 | 3ecfa06d07ece4e4524823046c2d02a9a1bb42c6 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy import Spider
from scrapy.http import Request
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose, Join
from https_crawler.items import HttpsCrawlerItem
class ClimateAdaptEasySpider(CrawlSpider):
name = 'climate-adapt-easy'
allowed_domains = ['climate-adapt.eea.europa.eu']
start_urls = ['http://climate-adapt.eea.europa.eu/']
rules = (
# Rule(LinkExtractor()),
Rule(LinkExtractor(),
callback='parse_item', follow=True),
)
def parse_responses(self, responses):
for response in responses:
self.parse_item(response)
def parse_item(self, response):
i = dict()
images = response.xpath('//img/@src').extract()
if len(images) > 1:
i['img'] = images
#i['domain_id'] = response.xpath('//input[@id="sid"]/@value').extract()
#i['name'] = response.xpath('//div[@id="name"]').extract()
#i['description'] = response.xpath('//div[@id="description"]').extract()
else:
i['img'] = [images]
return i
|
23,043 | bbc6a7700d63a44162deeb31593d1f9d15bb7f09 | # coding=utf-8
"""
This file is part of GeoRemindMe.
GeoRemindMe is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
GeoRemindMe is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GeoRemindMe. If not, see <http://www.gnu.org/licenses/>.
"""
from google.appengine.api import mail
from django.utils.translation import ugettext as _
from django.conf import settings
from tasks import EmailHandler
class GeoMail(mail.EmailMessage):
def __init__(self, *args, **kwargs):
self.sender = 'noreply@georemind.me'
super(self.__class__, self).__init__(*args,**kwargs)
def push(self):
'''
Añade el correo a la cola
'''
EmailHandler().add(self)
def send_contact_email(org,msg,to=settings.CONTACT_EMAIL,):
import datetime
message = GeoMail()
message.sender = 'noreply@georemind.me'
message.to = to
message.subject = "[GeoRemindMe] Email de contacto"
message.html = u"""%s<br/>%s dejó el mensaje:<br/>"%s" """ % (str(datetime.datetime.now()),org,msg)
message.push()
def send_keepuptodate(org,msg,to=settings.CONTACT_EMAIL,):
import datetime
message = GeoMail()
message.sender = 'noreply@georemind.me'
message.to = to
message.subject = "[GeoRemindMe] Keep up to date"
message.html = u"""%s<br/>%s<br/>%s" """ % (str(datetime.datetime.now()),org,msg)
message.push()
|
23,044 | 894c2e781594e8841ad39b003bddd47b193f064d | import subprocess
import flask
from flask import Flask, request, render_template, send_file
app = Flask(__name__)
@app.route("/")
def main():
return render_template('Natbag2020.html')
@app.route("/image")
def image():
return send_file('flight_image.jpg')
@app.route("/iframe")
def iframe():
return ""
@app.route("/search")
def search():
return subprocess.check_output(["java", "-classpath",
"/home/ben/eclipse-workspace/Natbag2020/bin", "NatbagMain",
request.args.get('outformat'), request.args.get('flighttype'),
request.args.get('airline'), request.args.get('country'),
request.args.get('city'), request.args.get('airport'),
request.args.get('startdate'), request.args.get('enddate'),
request.args.get('sunday'), request.args.get('monday'),
request.args.get('tuesday'), request.args.get('wednesday'),
request.args.get('thursday'), request.args.get('friday'),
request.args.get('saturday')]) |
23,045 | e0c5c671a9a0c1e5f00764164662fcdfeb0c543e | """
Track Fitting
"""
from scipy import optimize
import numpy as np
import math
def helix_fitter(x, y, z):
# find the center of helix in x-y plane
def calc_R(xc, yc):
return np.sqrt((x-xc)**2 + (y-yc)**2)
def fnc(c):
Ri = calc_R(*c)
return Ri - Ri.mean()
r3 = np.sqrt(x**2 + y**2 + z**2)
p_zr0 = np.polyfit(r3, z, 1, full=True)
# res0 = p_zr0[1][0]/x.shape[0]
p_zr = p_zr0[0]
theta = np.arccos(p_zr[0])
# theta = np.arccos(z[0]/r3[0])
eta = -np.log(np.tan(theta/2.))
center_estimate = np.mean(x), np.mean(y)
trans_center, ier = optimize.leastsq(fnc, center_estimate)
x0, y0 = trans_center
R = calc_R(*trans_center).mean()
# d0, z0
d0 = abs(np.sqrt(x0**2 + y0**2) - R)
r = np.sqrt(x**2 + y**2)
p_rz = np.polyfit(r, z, 1)
pp_rz = np.poly1d(p_rz)
z0 = pp_rz(d0)
def quadratic_formular(a, b, c):
if a == 0:
return (-c/b, )
x1 = (-b + np.sqrt(b**2 - 4*a*c)) / (2*a)
x2 = (-b - np.sqrt(b**2 - 4*a*c)) / (2*a)
return (x1, x2)
# find the closest approaching point in x-y plane
int_a = 1 + y0**2/x0**2
int_b = -2*(x0 + y0**2/x0)
int_c = x0**2 + y0**2 - R**2
int_x0, int_x1 = quadratic_formular(int_a, int_b, int_c)
x1 = int_x0 if abs(int_x0) < abs(int_x1) else int_x1
y1 = y0*x1/x0
phi = np.arctan2(y1, x1)
# track travels colockwise or anti-colockwise
# positive for colckwise
xs = x[0] if x[0] != 0 else 1e-1
ys = y[0] if y[0] != 0 else 1e-1
is_14 = xs > 0
is_above = y0 > ys/xs*x0
sgn = 1 if is_14^is_above else -1
# last entry is pT*(charge sign)
return (d0, z0, eta, phi, 0.6*sgn*R/1000)
def conformal_mapping(x, y, z):
"""
x, y, z: np.array([])
return:
"""
# ref. 10.1016/0168-9002(88)90722-X
r = x**2 + y**2
u = x/r
v = y/r
# assuming the imapact parameter is small
# the v = 1/(2b) - u x a/b - u^2 x epsilon x (R/b)^3
pp, vv = np.polyfit(u, v, 2, cov=True)
b = 0.5/pp[2]
a = -pp[1]*b
R = math.sqrt(a**2 + b**2)
e = -pp[0] / (R/b)**3 # approximately equals to d0
magnetic_field = 2.0
pT = 0.3*magnetic_field*R/1000 # in GeV
# print(a, b, R, e, pT)
p_rz = np.polyfit(np.sqrt(r), z, 2)
pp_rz = np.poly1d(p_rz)
z0 = pp_rz(abs(e))
r3 = np.sqrt(r + z**2)
p_zr = np.polyfit(r3, z, 2)
cos_val = p_zr[0]*z0 + p_zr[1]
theta = np.arccos(cos_val)
eta = -np.log(np.tan(theta/2.))
phi = math.atan2(b, a)
return e, z0, eta, phi, pT |
23,046 | 983ee2537077c3d5fbe86ba494c1776da9a764a0 | # In python define a schema
from pyspark.sql.types import *
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
# create a SparkSession
# main program
if __name__ == "__main__":
# create a SparkSession
spark = (SparkSession
.builder
.appName("Example-3_6")
.getOrCreate())
# Programmatic way to define a schema
fire_schema = StructType([StructField('CallNumber', IntegerType(), True),
StructField('UnitID', StringType(), True),
StructField('IncidentNumber', IntegerType(), True),
StructField('CallType', StringType(), True),
StructField('CallDate', StringType(), True),
StructField('WatchDate', StringType(), True),
StructField('CallFinalDisposition', StringType(), True),
StructField('AvailableDtTm', StringType(), True),
StructField('Address', StringType(), True),
StructField('City', StringType(), True),
StructField('Zipcode', IntegerType(), True),
StructField('Battalion', StringType(), True),
StructField('StationArea', StringType(), True),
StructField('Box', StringType(), True),
StructField('OriginalPriority', StringType(), True),
StructField('Priority', StringType(), True),
StructField('FinalPriority', IntegerType(), True),
StructField('ALSUnit', BooleanType(), True),
StructField('CallTypeGroup', StringType(), True),
StructField('NumAlarms', IntegerType(), True),
StructField('UnitType', StringType(), True),
StructField('UnitSequenceInCallDispatch', IntegerType(), True),
StructField('FirePreventionDistrict', StringType(), True),
StructField('SupervisorDistrict', StringType(), True),
StructField('Neighborhood', StringType(), True),
StructField('Location', StringType(), True),
StructField('RowID', StringType(), True),
StructField('Delay', FloatType(), True)])
sf_fire_file = "C:/Spark/data/sf-fire-calls.csv"
fire_df = spark.read.csv(sf_fire_file, header=True, schema=fire_schema)
few_fire_df = (fire_df
.select("IncidentNumber", "AvailableDtTm", "CallType")
.where(expr("CallType") != "Medical Incident"))
few_fire_df.show(5, truncate=False)
(fire_df
.select("CallType")
.where(expr("CallType").isNotNull())
.agg(countDistinct("CallType").alias("UniqZengler"))
.show())
(fire_df
.select("CallType")
.where(expr("CallType").isNotNull())
.distinct()
.show())
|
23,047 | d6f54178ff879ef3e3c1e568edcc8cff587e6b14 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 17 19:47:03 2018
@author: chuny
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
df=pd.read_csv('Iris.csv')
df=df.drop(['Id'],axis=1)
X=df.values[:,0:4][1,1]
y=df.values[:,4]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.4)
#
clf=SVC(kernel='linear')
clf.fit(X_train,y_train)
y_pred = clf.predict(X_test)
print(accuracy_score(y_test,y_pred)) |
23,048 | d13c11b2f962160aa11f82839debd45e103188af | #!/usr/bin/env python3
import glob
import json
import os
import argparse
import sys
import re
META_JSON_NAME = '.meta.json'
MITRE_ATTACK_BY_TID = {}
META_FILES = [META_JSON_NAME, '.gitignore', '.gitkeep']
ID_COUNT = 0
parser = argparse.ArgumentParser()
parser.add_argument("phr_root")
parser.add_argument('-f', '--fill-html-template', action='store_true')
parser.add_argument('-t', '--html-template-file', default='graph_template.html')
parser.add_argument('-u', '--url-base', default='https://github.com/JYVSECTEC/PHR-model/tree/master/')
parser.add_argument('-r', '--resolve-mitre-attack-names', action='store_true')
parser.add_argument('-o','--output', type=argparse.FileType('w'), default='-')
def get_meta(folder_path):
meta_path = os.path.join(folder_path, META_JSON_NAME)
if os.path.exists(meta_path):
with open(meta_path) as in_f:
return json.load(in_f)
else:
return {}
def get_name(folder_path, meta=None):
meta = meta if meta else get_meta(folder_path)
if 'name' in meta:
return meta['name']
else:
return os.path.basename(folder_path)
def sort_children(children, meta):
if 'child_order' in meta:
child_order = meta['child_order']
meta_ordered = [c for c in children if c['folder_name'] in child_order]
rest = [c for c in children if c['folder_name'] not in child_order]
ordered_w_meta = sorted(meta_ordered, key=lambda c: child_order.index(c['folder_name']))
ordered_wo_meta = sorted(rest, key=lambda c: c['name'].lower())
return ordered_w_meta + ordered_wo_meta
else:
return sorted(children, key=lambda c: c['name'].lower())
def make_url(relative_path, options):
return '%s%s' % (options.url_base, relative_path)
def get_id():
global ID_COUNT
ID_COUNT += 1
return ID_COUNT
def import_folder(relative_path, options):
children = []
meta = get_meta(relative_path)
name = get_name(relative_path)
identifier = get_id()
folder_name = os.path.basename(relative_path)
full_path = os.path.join(options.phr_root, relative_path)
if folder_name.startswith('_'):
return None
folder_content_names = [os.path.basename(path) for path in glob.glob(os.path.join(full_path, '*'))]
folder_content_names = [n for n in folder_content_names if n not in META_FILES]
if not folder_content_names:
# Not even a README.md in this folder -> skip
print("Skip empty folder: %s" % relative_path, file=sys.stderr)
return None
for sub_content_name in folder_content_names:
sub_content_path = os.path.join(full_path, sub_content_name)
if not os.path.isdir(sub_content_path):
continue
sub_folder_relative = os.path.join(relative_path, sub_content_name)
r = import_folder(sub_folder_relative, options)
if r:
children.append(r)
children = sort_children(children, meta)
folder_type = 'topic'
if not children:
folder_type = 'tool'
attack_object = None
if options.resolve_mitre_attack_names and re.search('^T\d\d\d\d(.\d+)?$', name):
attack_object = MITRE_ATTACK_BY_TID.get(name)
if attack_object:
name = '%s (%s)' % (attack_object['name'], name)
return {
'id': identifier,
'name': name,
'folder_name': folder_name,
'children': children,
'type': folder_type,
'attack_url': attack_object['url'] if attack_object else None,
'url': make_url(relative_path, options),
'relative_path': relative_path
}
def preload_mitre_attack_enterprise():
if not os.path.exists('enterprise-attack.json'):
raise Exception('Download enterprise-attack.json first.')
with open('enterprise-attack.json') as in_f:
enterprise_attack = json.load(in_f)
for obj in enterprise_attack['objects']:
if not obj['type'] == 'attack-pattern':
continue
refs = [r for r in obj['external_references'] if r['source_name'] == 'mitre-attack']
if not refs:
continue
ref = refs[0]
MITRE_ATTACK_BY_TID[ref['external_id']] = {
'name': obj.get('name', ''),
'description': obj.get('description', ''),
'url': ref.get('url', '')
}
def run():
options = parser.parse_args()
if options.resolve_mitre_attack_names:
preload_mitre_attack_enterprise()
result = import_folder('', options)
if options.fill_html_template:
with open(options.html_template_file) as in_f:
template = in_f.read()
template = template.replace('JSON_PLACEHOLDER', json.dumps(result))
print(template, file=options.output)
else:
print(json.dumps(result, indent=4), file=options.output)
if __name__ == '__main__':
run() |
23,049 | 6bea705eb90084dab450d4b7332128d1b8389dec | from requests import get
import matplotlib.pyplot as plt
from dateutil import parser
url = 'https://apex.oracle.com/pls/apex/raspberrypi/weatherstation/getallmeasurements/505307'
weather = get(url).json()
temps = []
temps = [record['ambient_temp'] for record in weather['items']]
timestamps = [parser.parse(record['reading_timestamp']) for record in weather['items']]
plt.plot(timestamps, temps)
plt.show()
|
23,050 | 1c87f2c5fd796b2341f8976bf4e33861d2c1063a | from django_elasticsearch_dsl import Document, fields
from django_elasticsearch_dsl.registries import registry
from data.models import Task
@registry.register_document
class TaskDocument(Document):
source = fields.ObjectField(properties={
'name': fields.TextField(),
})
class Index:
name = 'tasks'
settings = {'number_of_shards': 1,
'number_of_replicas': 0}
class Django:
model = Task
fields = ['name', 'task_id']
def get_queryset(self):
return super(TaskDocument, self).get_queryset().select_related('source')
|
23,051 | bc2693858a73b7909641b15c57b4832d20544a69 | # -*- coding: utf-8 -*-
import cv2
import numpy as np
# Dicrease color
def k_mean_dic_color_step1(img, K=5):
if len(img.shape) > 2:
H, W, C = img.shape
else:
H, W = img.shape
C = 1
# reshape img into 2D
tmp_img = img.reshape(H * W, C)
# select one index randomly
i = np.random.choice(np.arange(H * W), K, replace=False)
color = tmp_img[i].copy()
print(color)
clss = np.zeros((H * W), dtype=int)
# each pixel
for i in range(H * W):
# get distance from base pixel
dis = np.sqrt(np.sum((color - tmp_img[i]) ** 2, axis=1))
# get argmin distance
clss[i] = np.argmin(dis)
# show
out = np.reshape(clss, (H, W)) * 50
out = out.astype(np.uint8)
return out
# Read image
img = cv2.imread("Jeanne.jpg").astype(np.float32)
# Process image
out = k_mean_dic_color_step1(img)
# Show and save image
cv2.namedWindow("result", 0)
cv2.resizeWindow("result", 512, 512)
cv2.imwrite("Myresult/out91.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows() |
23,052 | dd616f361193c2ecd636acef9dc8d7e8a6eec424 | # -*- coding:utf-8 -*-
import re
import time
import requests as rq
from bs4 import BeautifulSoup as bs
url = "https://zh.moegirl.org/index.php?search="
Header = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate",
"accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-US;q=0.7",
"cookie": "_ga=GA1.2.470584180.1586164666; __gads=ID=72a82448ce6715a9:T=1586164666:S=ALNI_MYCddajIL4xS4bvEPX7QVVqo_GBaA; _gid=GA1.2.769480504.1586754366; __cfduid=d62e5dccaba009d71e684f0784839dd3a1586754485; _gat=1",
"referer": "https://mzh.moegirl.org/index.php?search=asdasdas&title=Special:%E6%90%9C%E7%B4%A2&profile=default&fulltext=1&searchToken=ewtploc7i4le4sa8dwskw4avk",
"sec-fetch-dest": "document",
"sec-fetch-mode": "navigate",
"sec-fetch-site": "same-origin",
"sec-fetch-user": "?1",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (iPad; CPU OS 11_0 like Mac OS X) AppleWebKit/604.1.34 (KHTML, like Gecko) Version/11.0 Mobile/15A5341f Safari/604.1",}
class moeGirl:
def __init__(self):
super().__init__()
def getSearchContent(self,keywords):
res = rq.get(url + keywords,headers = Header)
while(int(res.status_code) != 200):
if(int(res.status_code) > 400):
return "爬虫错误"
page = bs(res.text,"html.parser")
if(len(page.select('p[class="mw-search-nonefound"]')) > 0):
return "没有结果"
result = page.select('div[class="mw-search-result-heading"]')
times = len(result)
reply = "结果如下:\n"
if(times >= 3):
times = 3
reply = "结果如下(过多结果取前三):\n"
for i in range(times):
reply = reply + result[i].contents[0]["title"] + "\n" + "https://zh.moegirl.org" + result[i].contents[0]["href"] + "\n"
return reply
|
23,053 | 7cf3bdbf1402aecd5533594343424eb6622fbdf9 | import pandas as pd
from datetime import datetime
from pathlib import Path
import pandas_ta as ta
from Pytrader_API_V1_06 import *
MT = Pytrader_API()
port = 1122 #FXCM MAIN 50k 1:100
list_symbols = ['AUDCAD', 'AUDCHF', 'AUDJPY', 'AUDNZD', 'AUDUSD', 'CADCHF', 'CADJPY', 'EURAUD', 'EURCAD', 'EURCHF', 'EURGBP', 'EURJPY', 'EURUSD', 'CHFJPY', 'GBPAUD', 'GBPCHF', 'GBPJPY', 'GBPUSD', 'NZDCAD', 'NZDJPY', 'NZDUSD', 'USDCAD', 'USDCHF', 'USDJPY']
symbols = {}
for pair in list_symbols:
symbols[pair] = pair
con = MT.Connect(server='127.0.0.1', port=port, instrument_lookup=symbols)
def get_signal(currency, tf='H1', x_bars=600):
bars = pd.DataFrame(MT.Get_last_x_bars_from_now(instrument = currency, timeframe = MT.get_timeframe_value('H1'), nbrofbars=x_bars))
current_price = bars['close'].loc[len(bars) - 1]
ema_raw = ta.ema(bars['close'], length = 200)
ema = ema_raw[len(bars) - 1] #last value
macd_raw = ta.macd(bars['close'])
macd_final = pd.concat([bars,macd_raw], axis=1, join='inner')
macd_curr = macd_final.loc[len(bars) - 1]['MACD_12_26_9']
macd_signal_curr = macd_final.loc[len(bars) - 1]['MACDs_12_26_9']
macd_prev = macd_final.loc[len(bars) - 6]['MACD_12_26_9']
macd_signal_prev = macd_final.loc[len(bars) - 6]['MACDs_12_26_9']
trend = ''
if current_price > ema:
trend = 'buy'
elif current_price < ema:
trend = 'sell'
else:
trend = 'ignore'
macd_trend = ''
if macd_curr < 0 and macd_signal_curr < 0:
if macd_curr > macd_signal_curr:
macd_trend = 'buy'
elif macd_curr < macd_signal_curr:
macd_trend = 'sell'
else:
macd_trend = 'ignore'
else:
macd_trend = 'ignore'
prev_macd_trend = ''
if macd_prev < 0 and macd_signal_prev < 0:
if macd_prev > macd_signal_prev:
prev_macd_trend = 'buy'
elif macd_prev < macd_signal_prev:
prev_macd_trend = 'sell'
else:
prev_macd_trend = 'ignore'
else:
prev_macd_trend = 'ignore'
return trend, macd_trend, prev_macd_trend |
23,054 | 97d71990db6960229083eaef607e96d7f8a7b81a | from tkinter import *
def calculate():
km_converted['text'] = f'{round(float(entry.get()) * 1.609343502101154)}'
window = Tk()
window.title('Miles to Km Converter')
window.config(padx=20, pady=20)
entry = Entry(width=10)
entry.insert(END, string='0')
entry.grid(row=0, column=1)
mile_text = Label(text='Miles')
mile_text.grid(row=0, column=2)
equal_to_text = Label(text='is equal to')
equal_to_text.grid(row=1, column=0)
km_converted = Label(text='0')
km_converted.grid(row=1, column=1)
km_text = Label(text='Km')
km_text.grid(row=1, column=2)
calc_button = Button(text='Calculate', command=calculate)
calc_button.grid(row=2, column=1)
window.mainloop()
|
23,055 | d89adf72112d1d854aced7c04da686f8f003638b | # 给定一个只包括 '(',')','{','}','[',']' 的字符串,判断字符串是否有效。
# 有效字符串需满足:
# 左括号必须用相同类型的右括号闭合。
# 左括号必须以正确的顺序闭合。
# 注意空字符串可被认为是有效字符串。
# 示例 1:
# 输入: "()"
# 输出: true
# 示例 2:
# 输入: "()[]{}"
# 输出: true
# 示例 3:
# 输入: "(]"
# 输出: false
# 示例 4:
# 输入: "([)]"
# 输出: false
# 示例 5:
# 输入: "{[]}"
# 输出: true
class Solution(object):
'''
题意:输入一个只包含括号的字符串,判断括号是否匹配
模拟堆栈,读到左括号压栈,读到右括号判断栈顶括号是否匹配
'''
def isValidParentheses(self, s):
stack = []
for ch in s:
# 压栈
if ch == '{' or ch == '[' or ch == '(':
stack.append(ch)
else:
# 栈需非空
if not stack:
return False
# 判断栈顶是否匹配
if ch == ']' and stack[-1] != '[' or ch == ')' and stack[-1] != '(' or ch == '}' and stack[-1] != '{':
return False
# 弹栈
stack.pop()
# 如果最后栈为空,那么说明所有的左括号都被匹配
return not stack |
23,056 | 1097933a5be24168f64275e0655b4c640de4ca91 | # manipulando arquivos
#arq1 = open('arquivo/sherlock.txt', 'r')
#arq1 = 'arquivo/sherlock.txt'
#print(arq1.read()) # mostra conteúdo do arquivo
#print(arq1.tell())
#arq1.seek(3,2)
#arq1.close()
# with open('arquivo01.txt', 'x') as f:
# f.write('Abrindo arquivos em python')
#with open(arq1, 'r+') as f:
# print(f.readlines())
# def soma(x, y):
# print(x+y)
# print(x-y)
# print(x*y)
# print(x/y)
# soma(10, 5)
# soma(2, 6)
# soma(4, 7)
# produtos = []
# def cadastraproduto(produto):
# produtos.append(produto)
# def listarproduto():
# for p in produtos:
# print(p)
# produto = ""
# i = 0
# while produto != "sair":
# produto = input('digite o produto:')
# cadastraproduto(produto)
# print('produto cadastrado')
# listarproduto
# i = i + 1
# print(i)
# print(produtos[0:i-1])
# def printa(*valores):
# print(valores)
# printa('nome', 'sobrenome', 'outro')
# def printa2(**valores):
# print(valores)
# printa2(v1='nome', v2='sobrenome', v3='outro')
# nome = 'Joao'
# def mudanome(novo_nome):
# nome = novo_nome
# return nome
# print(mudanome('Bruno'))
texto = 'Eu sou um cérebro, Watson. O resto é mero apêndice.'
def split_texto(text):
return text.split(' ')
def uppertexto(text):
return text.upper()
arquivo_novo = split_texto(texto)
print(arquivo_novo)
print(uppertexto(texto))
|
23,057 | 827742d892d8917f63e72a2c12d094e423ebc073 | # 1.
# Вх: список строк, Возвр: кол-во строк
# где строка > 2 символов и первый символ == последнему
def me(words):
count = 0
for element in words:
if (len(element) > 2) and (element[0] == element[len(element) - 1]):
count += 1
return count
# 2.
# Вх: список строк, Возвр: список со строками (упорядочено)
# за искл всех строк начинающихся с 'x', которые попадают в начало списка.
# ['tix', 'xyz', 'apple', 'xacadu', 'aabbbccc'] -> ['xacadu', 'xyz', 'aabbbccc', 'apple', 'tix']
def fx(words):
xlist = []
olist = []
for element in words:
if element[0] == 'x':
xlist.append(element)
else:
olist.append(element)
xlist.sort()
olist.sort()
xlist.extend(olist)
return xlist
# 3.
# Вх: список непустых кортежей,
# Возвр: список сортир по возрастанию последнего элемента в каждом корт.
# [(1, 7), (1, 3), (3, 4, 5), (2, 2)] -> [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
def cort(numbers):
numbers.sort(key=lambda x: (x[len(x) - 1]))
return numbers
def test(res, expt):
print("Test result: " + str(expt == res))
print("Actual: " + str(res) + "; Expected: " + str(expt))
return res == expt
def main():
print("Test 'me:'")
test(me(['a', 'bbb', 'ads', 'ssss', 'aveg']), 2)
test(me(['a', 'bwb', 'dddddwd', 'ssss', 'aveg']), 3)
test(me(['arr', '5532', '11', 'ssss42a', 'avega']), 0)
test(me(['arr', '5532', '11', 'ssss42a', 'avega']), 1)
print("Test 'fx:'")
test(fx(['tix', 'xyz', 'apple', 'xacadu', 'aabbbccc']), ['xacadu', 'xyz', 'aabbbccc', 'apple', 'tix'])
test(fx(['bba', 'aaa', 'xsa', 'bab', 'xas']), ['xas', 'xsa', 'aaa', 'bab', 'bba'])
test(fx(['bba', 'aaa', 'xsa', 'bab', 'xas']), ['xas', 'xsa', 'bba', 'bab', 'aaa'])
print("Test 'cort:'")
test(cort([(1, 7), (1, 3), (3, 4, 5), (2, 2)]), [(2, 2), (1, 3), (3, 4, 5), (1, 7)])
test(cort([(1, 7), (1, 3), (3, 4, 5), (2, 2)]), [(2, 2), (1, 7), (3, 4, 5), (1, 3)])
test(cort([(10, 7, 8), (1, 3), (3, 5), (2, 2)]), [(2, 2), (1, 3), (3, 5), (10, 7, 8)])
if __name__ == '__main__':
main()
|
23,058 | 53849417616f9e30ce077734c65f04256236c70a | """Run model"""
from sqlalchemy.sql.expression import and_
from sqlalchemy import Table, Column, ForeignKey
from sqlalchemy.types import Integer, String, DateTime, Text, Unicode, Boolean
from sqlalchemy.orm import relationship, backref, relation
from sqlalchemy.schema import ForeignKeyConstraint
from pynformatics.model.meta import Base
from pynformatics.model import User, SimpleUser
import datetime
class Stars(Base):
__tablename__ = "mdl_stars"
__table_args__ = {'schema': 'moodle'}
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('moodle.mdl_user.id'))
# user = relationship(SimpleUser, backref = backref('simpleuser1'), uselist=False, lazy=False, primaryjoin = user_id == SimpleUser.id)
title = Column(Unicode)
link = Column(Unicode)
def __init__(self, user, title, link):
self.link = link
self.title = title
self.user_id = user.id
def __json__(self, request):
return {
'id' : self.id,
'user_id' : self.user_id,
'title' : self.title,
'link' : self.link,
}
|
23,059 | ef5f9e8d90d2af3887d0a824a53fc4b5cdbf476f | """Tasks related to testing code"""
import logging
import json
import os
import re
from inspect import getfile
from pathlib import Path
from time import sleep
from typing import Callable, List, Optional, Union, Dict
from unittest import mock
from functools import wraps
from requests import Response, ConnectionError
from tamr_unify_client.operation import Operation
from tamr_toolbox import utils
LOGGER = logging.getLogger(__name__)
WINDOWS_RESERVED_CHARACTER_MAP = {
"<": "lt",
">": "gt",
":": "colon",
'"': "dquote",
"/": "fslash",
"\\": "bslash",
"|": "pipe",
"?": "qmark",
"*": "asterisk",
}
def _response_to_json(resp: Response, ip_dict: Dict[str, int]) -> str:
"""Converts a Response object into json string readable by the responses mocking library
Args:
resp: Response from a Tamr API call
ip_dict: Mapping of previously encountered IP addresses to their anonymization number
Returns:
The response represented as a json string
"""
if resp.encoding is None:
resp.encoding = "utf-8"
resp_log = {
"method": resp.request.method,
"url": _anonymize_url(resp.request.url, ip_dict),
"status": resp.status_code,
"content_type": resp.headers.get("Content-Type"),
"body": resp.text,
}
return json.dumps(resp_log, ensure_ascii=False)
def _anonymize_url(url: str, ip_dict: Dict[str, int]) -> str:
"""Returns a anonymized url. Updates the dictionary inplace if a new ip is encountered
Args:
url: A URL
ip_dict: Previously encountered IP addresses and an assigned numeric value
Returns:
URL with the IP address anonymized
"""
regex_match = re.match(r"(?i)(^https?://)(.*?)([/:].*$)", url)
ip = regex_match.group(2)
try:
num = ip_dict[ip]
except KeyError:
ip_dict[ip] = len(ip_dict.values()) + 1
num = ip_dict[ip]
return f"{regex_match.group(1)}ip-{num:05d}{regex_match.group(3)}"
def _collect_operation_calls(
*, response: Response, poll_interval_seconds: int = 3
) -> List[Response]:
"""If the provided response is an Operation, wait for the operation to complete and
return responses related to that operation.
Args:
response: A previous Response generated from the same Tamr client
poll_interval_seconds: Time interval (in seconds) between subsequent polls
Returns:
Responses related to polling the operation
"""
client = utils.client._from_response(response)
op = Operation.from_response(client, response)
LOGGER.info(f"Waiting for operation to complete: {op}")
request_while_pending = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
while op.state == "PENDING":
op = op.poll()
sleep(poll_interval_seconds)
request_while_running = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
op.wait()
request_when_complete = client.get(endpoint=f"/api/versioned/v1/operations/{op.resource_id}")
return [request_while_pending, request_while_running, request_when_complete]
def _log_response(*, log_path: Path, ip_dict: Dict[str, int], response: Response) -> None:
"""Appends a response to a file. If the response returned is
a Tamr Operation, poll the operation until complete and log those responses as well
Args:
log_path: File to write the response to
ip_dict: Mapping of previously encountered IP addresses to their anonymization number
response: The response to log
"""
LOGGER.info(f"logged request: {response.url}")
with log_path.open(mode="a", encoding="utf-8") as f:
all_responses = [response]
# Poll and wait for operations, if applicable
is_operation_request = bool(
re.match(re.compile(".*/api/versioned/v1/operations/.*"), response.url)
)
is_get_request = response.request.method == "GET"
if is_get_request and is_operation_request:
wait_resp = _collect_operation_calls(response=response)
all_responses.extend(wait_resp)
all_json = [_response_to_json(r, ip_dict) for r in all_responses]
f.writelines([f"{j}\n" for j in all_json])
def _build_response_log_path(
*, test_func: Callable, response_logs_dir: Optional[Union[str, Path]], **kwargs,
) -> Path:
"""Returns a file path for API response logs for a given test and test parameters
Args:
test_func: The test function
**kwargs: Arguments to the test function
Returns:
File path for the API response logs
"""
# Convert test arguments and their values to a string, skipping ignored arguments
test_params = "_".join([f"{k}={v}" for k, v in {**kwargs}.items()])
# Remove reserved characters from ndjson name
for char in WINDOWS_RESERVED_CHARACTER_MAP:
test_params = test_params.replace(char, WINDOWS_RESERVED_CHARACTER_MAP[char])
if len(test_params) > 0:
test_params = "__" + test_params
if response_logs_dir is None:
# If no directory is provided, create a directory with the name of the test file
# in a directory called "response_logs" located in the same directory as the test file
dir_matcher = re.match(r"(.*)(?:\\|/)(.*).py", str(Path(getfile(test_func))))
response_logs_dir = f"{dir_matcher.group(1)}/response_logs/{dir_matcher.group(2)}"
return Path(f"{response_logs_dir}/{test_func.__name__}{test_params}.ndjson")
def mock_api(
*, response_logs_dir: Optional[Union[str, Path]] = None, enforce_online_test=False
) -> Callable:
"""Decorator for `pytest` tests that mocks API requests by reading a file of
pre-generated responses. Will generate responses file based on a real connection
if pre-generated responses are not found.
Args:
response_logs_dir: Directory to read/write response logs
enforce_online_test: Whether an online test should be run, even if a response log
already exists
Returns:
Decorated function
"""
def wrap(test_function: Callable):
@wraps(test_function)
def wrapped(**kwargs):
response_log_path = _build_response_log_path(
test_func=test_function, response_logs_dir=response_logs_dir, **kwargs,
)
if response_log_path.exists() and enforce_online_test:
# Delete the file to enforce an online test
response_log_path.unlink()
if response_log_path.exists():
try:
LOGGER.info(f"Running offline test based on file at {response_log_path}")
_run_offline_test(
response_log_path=response_log_path, test_function=test_function, **kwargs,
)
except ConnectionError as e:
msg = (
f"A required API call was missing from response logs file for this "
f"offline test ({response_log_path}). The response log file must be "
f"regenerated. Delete the existing file to automatically regenerate a "
f"new one. Caused by: {e}"
)
LOGGER.error(msg)
raise ConnectionError(e)
else:
_run_online_test(
response_log_path=response_log_path, test_function=test_function, **kwargs
)
return wrapped
return wrap
# Handle ModuleNotFoundError to allow tamr_toolbox to be used when the optional dependency
# `responses` is not installed
try:
import responses
# Stores the original _real_send function of requests
_BASE_FIND_MATCH = responses.RequestsMock._find_match
# Stores the original _real_send function of responses
_BASE_SEND_REAL = responses._real_send
@responses.activate
@mock.patch.object(Operation.wait, "__defaults__", (0, None)) # sets operation wait time to 0
def _run_offline_test(response_log_path: Path, test_function: Callable, **kwargs) -> None:
"""Runs a test function against saved API responses located in a file
Args:
response_log_path: Location of saved API responses
test_function: The function to test
**kwargs: Keyword arguments for the test function
"""
with response_log_path.open(encoding="utf-8") as f:
for line in f:
response = json.loads(line)
responses.add(**response)
ip_lookup = {}
def _find_anonymized_match(self, request):
"""Allows responses library to match requests for an ip address to match to an
anonymized ip address
"""
request.url = _anonymize_url(request.url, ip_lookup)
return _BASE_FIND_MATCH(self, request)
with mock.patch("responses.RequestsMock._find_match", new=_find_anonymized_match):
test_function(**kwargs)
@responses.activate
def _run_online_test(response_log_path: Path, test_function: Callable, **kwargs) -> None:
"""Runs a test function against a Tamr instance and saves the API responses to a file
Args:
response_log_path: Location to save API responses
test_function: The function to test
**kwargs: Keyword arguments for the test function
"""
LOGGER.info(
f"Online test running against Tamr instance. "
f"Creating new file at {response_log_path}. This may take a while ..."
)
os.makedirs(response_log_path.parent, exist_ok=True)
response_log_path.touch()
# Each time an API call is made, allow it to pass through responses and make a real call
# Each time a real call is made, log the response in the response file
responses.add_passthru(re.compile(".*"))
ip_lookup = {}
def _send_real_with_log(*args, **kwargs) -> Response:
"""Logs the response from BASE_SEND_REAL
Args:
*args: The positional arguments for BASE_SEND_REAL
**kwargs: The keyword arguments for BASE_SEND_REAL
Returns:
The response from the call
"""
response = _BASE_SEND_REAL(*args, **kwargs)
# Prevent recursion
with mock.patch("responses._real_send", new=_BASE_SEND_REAL):
_log_response(log_path=response_log_path, response=response, ip_dict=ip_lookup)
return response
with mock.patch("responses._real_send", new=_send_real_with_log):
test_function(**kwargs)
# Setting the passthru above permanently changes state for online testing
# Reset passthru to default
responses.mock.passthru_prefixes = ()
responses._default_mock.passthru_prefixes = ()
except ModuleNotFoundError as err:
# Ensure exception is due to responses package being missing
if err.msg != "No module named 'responses'":
raise err
def _run_offline_test(*args, **kwargs):
"""Dummy function to raise the appropriate exception if the function is called without the
necessary package installed
"""
import responses # noqa: F401
def _run_online_test(*args, **kwargs):
"""Dummy function to raise the appropriate exception if the function is called without the
necessary package installed
"""
import responses # noqa: F401
|
23,060 | 2c1fde66e99972513237f54674b67c7e4f563011 | #!/usr/bin/env python
from itertools import cycle
import numpy as np
from vispy import app, scene
import vispy.io as vispy_file
from vispy.visuals.transforms import STTransform
from vispy.gloo.util import _screenshot
def main(seq=0):
# Read volume
vol1 = np.load('config_space/{}_space.npy'.format(seq))
vol2 = np.load('config_space/{}_space_1.npy'.format(seq))
vol = vol1*5 + vol2 * 10
path = np.load('config_space/{}_path.npy'.format(seq))
# Prepare canvas
canvas = scene.SceneCanvas(keys='interactive', size=(1920, 1080), show=True)
canvas.measure_fps()
# Set up a viewbox to display the image with interactive pan/zoom
view = canvas.central_widget.add_view()
# Set whether we are emulating a 3D texture
volume1 = scene.visuals.Volume(
vol, clim=[0., 10.], threshold=0.225,
emulate_texture=False, relative_step_size=1.5,
method='iso', parent=view.scene)
cube1 = scene.visuals.Cube(size=5, color='red', edge_color='black', parent=view.scene)
cube1.transform = scene.transforms.STTransform(translate=(path[0][2], path[0][1], path[0][0]))
cube2 = scene.visuals.Cube(size=5, color='green', edge_color='black', parent=view.scene)
cube2.transform = scene.transforms.STTransform(translate=(path[-1][2], path[-1][1], path[-1][0]))
# Create camera Arcball
view.camera = scene.cameras.ArcballCamera(parent=view.scene, fov=0., name='Arcball')
# Create an XYZAxis visual
# axis = scene.visuals.XYZAxis(parent=view)
# s = STTransform(translate=(50, 50), scale=(50, 50, 50, 1))
# affine = s.as_matrix()
# axis.transform = affine
@canvas.connect
def on_key_press(ev):
print(ev.key.name)
if ev.key.name in 'S':
print("Saving...")
res = _screenshot()
vispy_file.write_png('config_space/{}_shot.png'.format(seq), res)
print("Done")
if __name__ == '__main__':
for no in [3520, 4960, 8320, 12320]:
main(seq=no)
app.run()
|
23,061 | 9427e1b158ae9249b95268f8f9562b6569f48b5b | import itertools
def get_data():
with open("data/data_09.txt") as data_file:
data_string = data_file.read()
data_array = data_string.split("\n")
data_array = list(map(lambda x: int(x), data_array))
return data_array
def fetch_preamble(start, length):
with open("data/data_09.txt") as f:
iteration = itertools.islice(f, start, start + length)
list_output = list(map(lambda x: str.rstrip(x), [item for item in iteration]))
return list_output
def check_sums(preamble, number):
solution = []
for i, value1 in enumerate(preamble):
for value2 in preamble[i:]:
total = int(value1) + int(value2)
if total == number:
solution.append(number)
if len(solution) == 0:
return False
else:
return True
def find_contiguous(data, number):
start = 0
while start < len(data):
contig = []
for value in data[start::]:
contig.append(value)
if sum(contig) == number:
return contig
start += 1
return []
def calculate_answer(data):
data.sort()
return data[0] + data[-1]
data_set = get_data()
total = len(data_set)
preamble_length = 25
remaining_data = data_set[preamble_length:]
for counter, value in enumerate(remaining_data):
preamble = fetch_preamble(counter, preamble_length)
result = check_sums(preamble, value)
if not result:
invalid_number = value
print(f"Value {value} has failed")
weak_set = find_contiguous(data_set, invalid_number)
answer = calculate_answer(weak_set)
print(answer) |
23,062 | c96012365db03715354e0382357f48093939793b | import pytest
import torch
from torch.testing import assert_allclose
from torch.autograd import gradcheck
from kornia.feature import HardNet
import kornia.testing as utils # test utils
class TestHardNet:
def test_shape(self, device):
inp = torch.ones(1, 1, 32, 32, device=device)
hardnet = HardNet().to(device)
hardnet.eval() # batchnorm with size 1 is not allowed in train mode
out = hardnet(inp)
assert out.shape == (1, 128)
def test_shape_batch(self, device):
inp = torch.ones(16, 1, 32, 32, device=device)
hardnet = HardNet().to(device)
out = hardnet(inp)
assert out.shape == (16, 128)
@pytest.mark.skip("jacobian not well computed")
def test_gradcheck(self, device):
patches = torch.rand(2, 1, 32, 32, device=device)
patches = utils.tensor_to_gradcheck_var(patches) # to var
hardnet = HardNet().to(patches.device, patches.dtype)
assert gradcheck(hardnet, (patches,), eps=1e-4, atol=1e-4,
raise_exception=True, )
|
23,063 | 92ab2372383b6535e72d2471cad2c32102f9a23a | # -*- coding: utf-8 -*-
"""Gradient Descent vs Simple Genetic Algorithm.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1sZ4WcFcckQmL7kWB7Z8AHsAzP5nV-KJq
# Gradient Descent vs Simple Genetic Algorithm
## Gradient Descent implementation
"""
import numpy as np
coeff = np.array([2.0, -2.0, 3.0, 1.0, -1.0, 8.0])
def grad_descent(co, x, lr, num_iters):
for ii in range(num_iters):
x -= lr * co
print('candidate:'+ str(x), ' ',' value:' + str(np.sum(co*x)))
x = np.zeros(6)
grad_descent(coeff, x, 0.1, 12)
"""## Simple Genetic Algorithm implementation"""
import numpy as np
import numpy.random as npr
coeff = np.array([2.0, -2.0, 3.0, 1.0, -1.0, 8.0])
coeff_size = coeff.size # size of chromosome for each candidate
candidates = 8 # 8 candidates per generations
pop_size = (candidates, coeff_size)
curr_pop = npr.uniform(low = -3.0, high = 3.0, size = pop_size) # generate random intial population
num_generations = 50
num_mating = 4
def mating_pool(pop, fitness, num_mat):
parents = np.empty((num_mat, pop.shape[1])) # define parents with chromosomes of appropriate length
for parent in range(num_mat):
max_fitness = np.where(fitness == np.max(fitness)) # store index of parent with highest fitness
parents[parent,:] = pop[max_fitness,:]
fitness[max_fitness] = -1000000000000000 # remove this parent so the next best can be selected
return parents
def crossover(parents, num_children):
children = np.empty(num_children)
crossover_point = 4
for ii in range(num_children[0]):
parent1 = ii % parents.shape[0]
parent2 = (ii+1) % parents.shape[0]
children[ii, 0:crossover_point] = parents[parent1, 0:crossover_point]
children[ii, crossover_point:] = parents[parent2, crossover_point:]
return children
def mutate(children_crossover):
for ii in range(children_crossover.shape[0]):
rand = npr.uniform(-1.0,1.0,1)
idx = npr.randint(children_crossover.shape[1])
children_crossover[ii, idx] += rand
return children_crossover
for generation in range(num_generations):
lastgen = curr_pop
fitness = -1 * (np.sum(curr_pop * coeff, axis = 1)) # calculate fitness of each candidate
print('best value of generation: '+str(-1*np.max(fitness)))
parents = mating_pool(curr_pop, fitness, num_mating)
children_crossover = crossover(parents, num_children = (candidates-num_mating, coeff_size))
children_mutation = mutate(children_crossover)
curr_pop[0:num_mating,:] = parents # carry the best parents through to next generation
curr_pop[num_mating:,:] = children_mutation # add the offspring to new population
print(lastgen)
print(np.sum(curr_pop * coeff, axis = 1)) # fitness values for the final population |
23,064 | 93b0e249040a7744bb5f40c39f80c6bcf72c4c80 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 29 14:56:33 2019
@author: adrian.perez
"""
#Clustering Gerárquico
#Como importar las librerias
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#importar el dataset
dataset = pd.read_csv("Mall_customers.csv")
X=dataset.iloc[:,[3,4]].values
#Utilizar método de dendograma para encontrar el número optimo de clusters
import scipy.cluster.hierarchy as sch
dendograma=sch.dendrogram(sch.linkage(X,method="ward"))
plt.title("Dendograma")
plt.xlabel("Clientes")
plt.ylabel("Distancia Euclidea")
plt.show()
#ajustar el clustering jerárquico a nuestros datos
from sklearn.cluster import AgglomerativeClustering
hc=AgglomerativeClustering(n_clusters=5,affinity="euclidean",linkage="ward")
y_hc=hc.fit_predict(X)
#Representación gráfica de los clusters
plt.scatter(X[y_hc==0,0],X[y_hc==0,1],s=100,c="red",label="Cluster 1")
plt.scatter(X[y_hc==1,0],X[y_hc==1,1],s=100,c="green",label="Cluster 2")
plt.scatter(X[y_hc==2,0],X[y_hc==2,1],s=100,c="blue",label="Cluster 3")
plt.scatter(X[y_hc==3,0],X[y_hc==3,1],s=100,c="cyan",label="Cluster 4")
plt.scatter(X[y_hc==4,0],X[y_hc==4,1],s=100,c="magenta",label="Cluster 5")
#plt.scatter(hc.cluster_centers_[:,0],hc.cluster_centers_[:,1],s=300,c="yellow",label="Baricentros")
plt.title("Cluster de clientes")
plt.xlabel("Ingresos Anuales $")
plt.ylabel("Puntuación de Gastos")
plt.legend()
plt.show() |
23,065 | e1ad7e6ab8e54e7f1d56bfbcfeec3dec3fe91dd1 | # coding:utf-8
from .BaseMsg import BaseMsg, FILE_CONTROL_COMMAND
FILE_SEND_DATA = 1
FILE_DELETE = 2
FILE_LIST = 3
FILE_RET_OK = 4
FILE_RET_FAILED = 5
class FileMsg(BaseMsg):
def __init__(self):
super(FileMsg, self).__init__()
self.cmd = FILE_CONTROL_COMMAND
|
23,066 | 3f46756cc600ebbc0b432cd5c877b87962fc9e57 | #!/usr/bin/env python
import commands
CheckEnvVariable = 'TEST_PACKAGE_CHECK_EVAL_STRING'
def rev_parse(folder, revision):
return commands.getstatusoutput('git -C %s rev-parse %s' % (folder, revision))[1].strip();
|
23,067 | 7184668a9fa3e602115428833bf899c09fa115f7 | import pytest
from tests.lib import trove_tester, InvalidClassifier
@pytest.mark.parametrize(
"classifiers, deprecated_classifiers",
[
(
{
"Foo :: Bar",
"Foo :: Bar :: Baz",
},
{},
),
({"Foo :: Bar"}, {"Biz :: Baz": ["Foo :: Bar"]}),
],
)
def test_success(classifiers, deprecated_classifiers):
trove_tester(classifiers, deprecated_classifiers)
@pytest.mark.parametrize(
"classifiers, deprecated_classifiers, expected",
[
(
{"Foo", "Foo :: Bar"},
{},
"Top-level classifier 'Foo' is invalid",
),
({"Foo :: Bar :: Baz"}, {}, "Classifier 'Foo :: Bar' is missing"),
(
{
"Foo :: Bar",
},
{"Biz :: Baz": ["Bing :: Bang"]},
"Classifier 'Bing :: Bang' does not exist",
),
(
{
"Foo :: Bar",
},
{"Foo :: Bar": []},
"Classifier 'Foo :: Bar' in both valid and deprecated classifiers",
),
({"Private :: Foo"}, {}, "Classifiers starting with 'Private' are invalid"),
({"private :: Foo"}, {}, "Classifiers starting with 'Private' are invalid"),
({"Foo :: Private"}, {}, "Classifiers starting with 'Private' are invalid"),
({"Foo :: private"}, {}, "Classifiers starting with 'Private' are invalid"),
(
{" Foo :: Bar"},
{},
"Classifiers starting or ending with whitespace are invalid",
),
(
{"Foo :: Bar "},
{},
"Classifiers starting or ending with whitespace are invalid",
),
(
{"Foo: :: Bar"},
{},
"Classifiers containing ':' are invalid",
),
(
{"Foo :: B:ar"},
{},
"Classifiers containing ':' are invalid",
),
(
{"Foo :: Bar: Baz"},
{},
"Classifiers containing ':' are invalid",
),
],
)
def test_failure(classifiers, deprecated_classifiers, expected):
with pytest.raises(InvalidClassifier) as excinfo:
trove_tester(classifiers, deprecated_classifiers)
assert excinfo.value.args == (expected,)
|
23,068 | 428831c2c4a17d007598febbcc6888fe2cef44d0 | import numpy as np
import irm
import models
import glob
import pandas
def compute_prob_matrix(tgt_latent, tgt_data, model_name='LogisticDistance'):
"""
Compute the probability of a connection at EVERY LOCATION in the matrix
Does not depend on the actual observed values of data
"""
ss = tgt_latent['relations']['R1']['ss']
ass = tgt_latent['domains']['d1']['assignment']
hps = tgt_latent['relations']['R1']['hps']
data_conn = tgt_data['relations']['R1']['data']
N = data_conn.shape[0]
pred = np.zeros((N, N))
for i in range(N):
for j in range(N):
c1 = ass[i]
c2 = ass[j]
c = ss[(c1, c2)]
if model_name == "LogisticDistance":
dist = data_conn['distance'][i, j]
y = irm.util.logistic(dist, c['mu'], c['lambda'])
y = y * (hps['p_max'] - hps['p_min']) + hps['p_min']
elif model_name == "BetaBernoulliNonConj":
y = c['p']
else:
raise NotImplementedError()
pred[i, j] = y
return pred
|
23,069 | 42b629c521988d6b15cda057a255c80cda314a7a | import datetime
import pandas as pd
import numpy as np
from util import log, timeit
def parse_time(xtime: pd.Series):
result = pd.DataFrame()
dtcol = pd.to_datetime(xtime, unit='s')
result[f'{xtime.name}'] = dtcol.astype('int64') // 10**9
result[f'{xtime.name}_year'] = dtcol.dt.year.astype('category')
result[f'{xtime.name}_month'] = dtcol.dt.month.astype('category')
result[f'{xtime.name}_day'] = dtcol.dt.day.astype('category')
result[f'{xtime.name}_weekday'] = dtcol.dt.weekday.astype('category')
result[f'{xtime.name}_hour'] = dtcol.dt.hour.astype('category')
result[f'{xtime.name}_dayofyear'] = dtcol.dt.dayofyear.astype('category')
result[f'{xtime.name}_quarter'] = dtcol.dt.quarter.astype('category')
return result
class TypeAdapter:
def __init__(self, primitive_cat, primary_timestamp, y, label, info):
self.adapt_cols = primitive_cat.copy()
self.time = primary_timestamp
self.y = y
self.label= label
self.info = info
@timeit
def fit_transform(self, X):
cols_dtype = dict(zip(X.columns, X.dtypes))
self.fill_na(X)
for key, dtype in cols_dtype.items():
if dtype == np.dtype('object'):
self.adapt_cols.append(key)
if key in self.adapt_cols:
X[key] = X[key].astype('category')
#X[key+'hash'] = X[key].apply(hash_m)
X_copy = X.copy()
X_copy[self.time] = pd.to_datetime(X_copy[self.time], unit='s')
X_copy =pd.concat([X_copy, self.y], axis=1)
X['timediff'] = X_copy.sort_values([key, self.time]).groupby(key)[self.time].diff().dt.days
#for i in X[key].unique():
X[key + 'Unique_Count'] = len(X[key].unique())
X[key+'t-1'] = X_copy.sort_values(self.time).groupby([key])[self.label].shift(1)
X[key+'t-2'] = X_copy.sort_values(self.time).groupby([key])[self.label].shift(2)
X[key+'t-7'] = X_copy.sort_values(self.time).groupby([key])[self.label].shift(7)
X[key+'t-30'] = X_copy.sort_values(self.time).groupby([key])[self.label].shift(30)
#X['timediff']= pd.to_datetime(X['timediff'],unit='s')
X['timediff'].fillna(datetime.datetime(1970, 1, 1))
#X['timediff'].dt.days.astype('str')
return X
@timeit
def transpose_matrix(self, X):
c = (X.groupby(['ID', 'col']).cumcount() + 1).astype(str)
# remove col2
X = X.set_index(['ID', 'col', c]).unstack()
# flatten Multiindex
X.columns = X.columns.map('_'.join)
X = X.reset_index()
@timeit
def transform(self, X):
for key in X.columns:
if key in self.adapt_cols:
X[key] = X[key].astype('category')
#X[key+'hash'] = X[key].apply(hash_m)
return X
@timeit
def fill_na(self, df):
schema = self.info['schema']
num_cols = [col for col, types in schema.items() if types == 'num']
num_cols.remove(self.label)
# cat_cols = [c for c in df if c.startswith(CONSTANT.CATEGORY_PREFIX)]
m_cat_cols = [col for col, types in schema.items() if types == 'str']
time_cols = [col for col, types in schema.items() if types == 'timestamp']
for c in [num_cols]:
df.groupby(m_cat_cols)[c].fillna(method='ffill', inplace=True)
for c in [m_cat_cols]:
df[c].fillna("Unknown", inplace=True)
for c in [time_cols]:
df[c].fillna(method='ffill', inplace=True)
def hash_m(x):
return hash(x) % 1048575
|
23,070 | 586c341c74968e05da853022c56b1a46c56da330 | # -*- python -*-
from mi.client.utils import _
from mi.client.utils import magicstep
class MIStep_network (magicstep.magicstep):
def __init__(self, rootobj):
magicstep.magicstep.__init__(self, rootobj, 'network.xml')
def get_label(self):
return _("Network")
def check_ready(self):
return 1
|
23,071 | fe858b8393767badf8da1938158fcac380b41d3c | # Generated by Django 2.2.19 on 2021-07-02 19:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("search", "0036_auto_20210701_1028"),
]
operations = [
migrations.CreateModel(
name="QuerySource",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("cls", models.CharField(blank=True, default="", max_length=255)),
("object_id", models.CharField(blank=True, default="", max_length=255)),
],
options={"managed": False,},
),
]
|
23,072 | f7662f22bed9b4b05d33163da9c519b100c4826e | #!/usr/bin/env python
# _*_coding:utf-8 _*_
# @Time :2021/6/19 15:58
# @Author :Jiawei Lian
# @FileName: defect_detector
# @Software: PyCharm
from copy import deepcopy
import cv2
import ensemble_boxes
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
from PIL import Image
from torch.hub import load_state_dict_from_url
# from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
from faster_rcnn import FastRCNNPredictor
from torchvision.transforms import functional as F, transforms
from torchvision.transforms import transforms as T
import faster_rcnn
class BaseWheatTTA:
""" author: @shonenkov """
image_size = 512
def augment(self, image):
raise NotImplementedError
def batch_augment(self, images):
raise NotImplementedError
def deaugment_boxes(self, boxes, image):
raise NotImplementedError
def get_object_detector(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
model = faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=False)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
return model
class TTAHorizontalFlip(BaseWheatTTA):
""" author: @shonenkov """
def augment(self, image):
return image.flip(1)
def batch_augment(self, images):
return images.flip(2)
def deaugment_boxes(self, boxes, image):
width = image.width
boxes[:, [2, 0]] = width - boxes[:, [0, 2]]
return boxes
class TTAVerticalFlip(BaseWheatTTA):
""" author: @shonenkov """
def augment(self, image):
return image
def batch_augment(self, images):
return images.flip(3)
def deaugment_boxes(self, boxes, image):
height = image.height
boxes[:, [3, 1]] = height - boxes[:, [1, 3]]
return boxes
class TTACompose(BaseWheatTTA):
""" author: @shonenkov """
def __init__(self, transforms):
self.transforms = transforms
def augment(self, image):
for transform in self.transforms:
image = transform.augment(image)
return image
def batch_augment(self, images):
for transform in self.transforms:
images = transform.batch_augment(images)
return images
def prepare_boxes(self, boxes):
result_boxes = boxes
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
boxes[:, 0], idx = result_boxes[:, [0, 2]].min(1)
return boxes
def deaugment_boxes(self, boxes, image):
for transform in self.transforms[::-1]:
boxes = transform.deaugment_boxes(boxes, image)
return self.prepare_boxes(boxes)
def tensor_to_PIL(tensor):
image = tensor.cpu().clone()
image = image.squeeze(0)
image = transforms.ToPILImage()(image)
return image
def del_tensor_ele(arr, index):
arr1 = arr[0:index]
arr2 = arr[index + 1:]
return torch.cat((arr1, arr2), dim=0)
def del_under_threshold(result, threshold=0.):
idxes = []
for idx in range(len(result[0]['scores'])):
if result[0]['scores'][idx] < threshold:
idxes.append(idx)
for i in idxes:
result[0]['scores'] = del_tensor_ele(result[0]['scores'], len(result[0]['scores']) - 1)
result[0]['labels'] = del_tensor_ele(result[0]['labels'], len(result[0]['labels']) - 1)
result[0]['boxes'] = del_tensor_ele(result[0]['boxes'], len(result[0]['boxes']) - 1)
return result
def del_fusion_under_threshold(boxes, labels, scores, threshold=0.):
idxes = []
for idx in range(len(scores)):
if scores[idx] < threshold:
idxes.append(idx)
for i in idxes:
scores = del_tensor_ele(scores, len(scores) - 1)
labels = del_tensor_ele(labels, len(labels) - 1)
boxes = del_tensor_ele(boxes, len(boxes) - 1)
return boxes, labels, scores
def py_cpu_nms(boxes, scores, thresh=0.55):
"""Pure Python NMS baseline."""
# x1、y1、x2、y2、以及score赋值
boxes = boxes.detach().numpy()
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
scores = scores
# 每一个检测框的面积
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
# 按照score置信度降序排序
# order = scores.argsort()[::-1]
all_scores, order = scores.sort(descending=True)
keep = [] # 保留的结果框集合
# print(order)
while int(len(order.detach().numpy())) > 0:
i = order[0]
keep.append(i.numpy()) # 保留该类剩余box中得分最高的一个
# 得到相交区域,左上及右下
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
# 计算相交的面积,不重叠时面积为0
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
# 计算IoU:重叠面积 /(面积1+面积2-重叠面积)
ovr = inter / (areas[i] + areas[order[1:]] - inter)
# 保留IoU小于阈值的box
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1] # 因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位
return keep
def soft_nms(bboxes, scores, Nt=0.3, sigma2=0.5, score_thresh=0.001, method=2):
# 在 bboxes 之后添加对于的下标[0, 1, 2...], 最终 bboxes 的 shape 为 [n, 5], 前四个为坐标, 后一个为下标
# res_bboxes = deepcopy(bboxes)
N = bboxes.shape[0] # 总的 box 的数量
indexes = np.array([np.arange(N)]) # 下标: 0, 1, 2, ..., n-1
bboxes = bboxes.detach().numpy()
bboxes = np.concatenate((bboxes, indexes.T), axis=1) # concatenate 之后, bboxes 的操作不会对外部变量产生影响
# 计算每个 box 的面积
x1 = bboxes[:, 0]
y1 = bboxes[:, 1]
x2 = bboxes[:, 2]
y2 = bboxes[:, 3]
scores = scores
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
scores, order = scores.sort(descending=True)
scores = scores.detach().numpy()
for i in range(N):
# 找出 i 后面的最大 score 及其下标
pos = i + 1
if i != N - 1:
maxscore = np.max(scores[pos:], axis=0)
maxpos = np.argmax(scores[pos:], axis=0)
else:
maxscore = scores[-1]
maxpos = 0
# 如果当前 i 的得分小于后面的最大 score, 则与之交换, 确保 i 上的 score 最大
if scores[i] < maxscore:
bboxes[[i, maxpos + i + 1]] = bboxes[[maxpos + i + 1, i]]
scores[[i, maxpos + i + 1]] = scores[[maxpos + i + 1, i]]
areas[[i, maxpos + i + 1]] = areas[[maxpos + i + 1, i]]
# IoU calculate
xx1 = np.maximum(bboxes[i, 0], bboxes[pos:, 0])
yy1 = np.maximum(bboxes[i, 1], bboxes[pos:, 1])
xx2 = np.minimum(bboxes[i, 2], bboxes[pos:, 2])
yy2 = np.minimum(bboxes[i, 3], bboxes[pos:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
intersection = w * h
iou = intersection / (areas[i] + areas[pos:] - intersection)
# Three methods: 1.linear 2.gaussian 3.original NMS
if method == 1: # linear
weight = np.ones(iou.shape)
weight[iou > Nt] = weight[iou > Nt] - iou[iou > Nt]
elif method == 2: # gaussian
weight = np.exp(-(iou * iou) / sigma2)
else: # original NMS
weight = np.ones(iou.shape)
weight[iou > Nt] = 0
scores[pos:] = weight * scores[pos:]
# select the boxes and keep the corresponding indexes
inds = bboxes[:, 4][scores > score_thresh]
keep = inds.astype(int)
return keep
# image_path = './data/Images/2020-01-11_21_43_14_145.jpg'
# image_path = './data/Images/2020-03-07_08_34_30_467.jpg'
# image_path = './data/Images/2020-01-11_21_41_15_002.jpg'
image_path = './data/Images/2020-01-11_21_36_02_642.jpg'
# image_path = './data/Images/2020-03-10_16_18_20_688.jpg'
# image_path = './data/Images/2021-05-29-18-44-02.jpg'
# image_path = './data/Images/2021-05-16-18-51-54.jpg'
# image_path = './data/Images/2021-05-16-14-58-28.jpg'
# model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5959/model_23_5959_5288.pth'
# model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/model_0.pth'
model_path = '/home/user/Public/Jiawei_Lian/HW_defect_detection/ckpt/0.5932/model_8_5932.pth'
results = []
predictions = []
# you can try own combinations:
transform1 = TTACompose([
TTAHorizontalFlip(),
# TTAVerticalFlip()
])
transform2 = TTACompose([
# TTAHorizontalFlip(),
TTAVerticalFlip()
])
fig, ax = plt.subplots(3, 2, figsize=(16, 10))
image1 = Image.open(image_path).convert("RGB")
image1_vf = F.vflip(image1)
image_tensor = torch.from_numpy(np.array(image1))
image_tensor_vf = torch.from_numpy(np.array(image1_vf))
# image_tensor = image_tensor.permute(0, 1, 2)
image_numpy_vf = image_tensor_vf.cpu().numpy().copy()
image_numpy = image_tensor.cpu().numpy().copy()
image_numpy1 = image_tensor.cpu().numpy().copy()
image_numpy2 = image_tensor.cpu().numpy().copy()
image_numpy3 = image_tensor.cpu().numpy().copy()
# ax[0, 0].imshow(image)
# ax[0, 0].set_title('original')
tta_image1 = transform1.augment(image_tensor)
tta_image2 = transform2.augment(image_tensor_vf)
tta_image1_numpy = tta_image1.numpy().copy()
tta_image2_numpy = image_tensor_vf.numpy().copy()
tta_image1 = Image.fromarray(tta_image1_numpy)
tta_image2 = Image.fromarray(tta_image2_numpy)
########################################################################
# tta_image1 prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(tta_image1), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result = model(preprocessed_image)
result = del_under_threshold(result)
print('tta_image prediction:', result)
boxes3 = result[0]['boxes']
scores3 = result[0]['scores']
labels3 = result[0]['labels']
for box in boxes3:
cv2.rectangle(tta_image1_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[0, 0].imshow(tta_image1_numpy)
ax[0, 0].set_title('Augment1')
###################################################################
# deaugmentation prediction
boxes3 = transform1.deaugment_boxes(boxes3, image1)
results.append({
'boxes': boxes3,
'scores': scores3,
'labels': labels3,
})
for box in boxes3:
cv2.rectangle(image_numpy1, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[0, 1].imshow(image_numpy1)
ax[0, 1].set_title('Deaugment1')
#########################################################
########################################################################
# tta_image2 prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(tta_image2), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result = model(preprocessed_image)
result = del_under_threshold(result)
print('tta_image prediction:', result)
boxes4 = result[0]['boxes']
scores4 = result[0]['scores']
labels4 = result[0]['labels']
for box in boxes4:
cv2.rectangle(tta_image2_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[1, 0].imshow(tta_image2_numpy)
ax[1, 0].set_title('Augment2')
###################################################################
# deaugmentation prediction
boxes4 = transform2.deaugment_boxes(boxes4, image1_vf)
results.append({
'boxes': boxes4,
'scores': scores4,
'labels': labels4,
})
for box in boxes4:
cv2.rectangle(image_numpy3, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[1, 1].imshow(image_numpy3)
ax[1, 1].set_title('Deaugment2')
#########################################################
# original_image prediction
preprocessed_image = torch.unsqueeze(F.to_tensor(image1), dim=0)
model = get_object_detector(11)
model.load_state_dict(
torch.load(model_path, map_location='cpu')[
'model'])
model.eval()
result_original_image = model(preprocessed_image)
result_original_image = del_under_threshold(result_original_image)
print('original image prediction:', result_original_image)
boxes2 = result_original_image[0]['boxes']
scores2 = result_original_image[0]['scores']
labels2 = result_original_image[0]['labels']
results.append({
'boxes': boxes2,
'scores': scores2,
'labels': labels2,
})
for box in boxes2:
cv2.rectangle(image_numpy, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
ax[2, 0].imshow(image_numpy)
ax[2, 0].set_title('Original')
#######################################################
# # weghted boxes fusion
# predictions.append(results)
# boxes1, scores1, labels1 = run_wbf(predictions)
temp_all_boxes = torch.cat((boxes3, boxes2, boxes4), 0)
all_labels = torch.cat((labels3, labels2, labels4), 0)
all_scores = torch.cat((scores3, scores2, scores4), 0)
_, indices = all_scores.sort(descending=True)
all_labels = all_labels.gather(dim=0, index=indices)
all_scores = all_scores.gather(dim=0, index=indices)
all_boxes = torch.empty(len(indices), 4)
for i in range(len(indices)):
all_boxes[i] = temp_all_boxes[indices[i]]
all_boxes, all_labels, all_scores = del_fusion_under_threshold(all_boxes, all_labels, all_scores)
keep = py_cpu_nms(all_boxes, all_scores)
# keep = soft_nms(all_boxes, all_scores)
# scores1 = torch.from_numpy(scores1)
# boxes1 = torch.from_numpy(boxes1)
# labels1 = torch.from_numpy(labels1)
# temp_all_boxes = torch.cat((boxes2, boxes1), 0)
# all_labels = torch.cat((labels2, labels1), 0)
# all_scores = torch.cat((scores2, scores1), 0)
# print(boxes1, scores1, labels1)
#
# for box in boxes1:
# cv2.rectangle(image_numpy2, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
#
# ax[1, 1].imshow(image_numpy2)
# ax[1, 1].set_title('predictions fusion')
# all_scores, indices = all_scores.sort(descending=True)
# all_labels = all_labels.gather(dim=0, index=indices)
# all_boxes = torch.empty(len(indices), 4)
all_scores1 = all_scores[:len(keep)]
all_labels1 = all_labels[:len(keep)]
all_boxes1 = all_boxes[:len(keep)]
for i in range(len(keep)):
all_scores1[i] = all_scores[keep[i]]
all_labels1[i] = all_labels[keep[i]]
all_boxes1[i] = all_boxes[keep[i]]
labels = ["",
"connection_edge_defect",
"right_angle_edge_defect",
"cavity_defect",
"burr_defect",
"huahen",
"mosun",
"yanse",
'basi',
'jianju',
'chuizhidu', ]
i = 0
for box in all_boxes1:
cv2.rectangle(image_numpy2, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2)
# add label
# if box[1] > 10:
# cv2.putText(image_numpy2, labels[all_labels1[i]], (int(box[0]), int(box[1] - 6)),
# cv2.FONT_HERSHEY_COMPLEX_SMALL, 5,
# (255, 255, 0))
# else:
# cv2.putText(image_numpy2, labels[all_labels1[i]], (int(box[0]), int(box[1] + 15)),
# cv2.FONT_HERSHEY_COMPLEX_SMALL, 5,
# (255, 255, 0))
# i += 1
ax[2, 1].imshow(image_numpy2)
ax[2, 1].set_title('Fusion')
# Image._show(Image.fromarray(image_numpy2))
# Image.fromarray(image_numpy2).save('prediction.jpg')
print('fusion prediction:')
print(all_labels1)
print(all_scores1)
print(all_boxes1)
|
23,073 | f2c590b0a836a903a59b3132e83e8f716a05269e | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import numpy as np
from six.moves import cPickle
from six.moves import urllib
import tensorflow as tf
from kernels import core
REMOTE_URL = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
LOCAL_DIR = os.path.join("data/cifar100/")
ARCHIVE_NAME = "cifar-100-python.tar.gz"
DATA_DIR = "cifar-100-python/"
TRAIN_BATCHES = ["train"]
TEST_BATCHES = ["test"]
IMAGE_SIZE = 32
NUM_SHUFFLE_BATCHES = 10
NUM_THREADS = 8
@core.register_std_kernel
class CIFARDataset(core.Kernel):
@staticmethod
def get_config():
config = core.Config("CIFAR100 Dataset", "cifar100_dataset")
config.add_output(core.Port(name="images"))
config.add_output(core.Port(name="labels"))
config.add_attribute(core.Attribute(name="split", type="string", value="train"))
config.add_attribute(core.Attribute(name="batch_size", type="int", value="128"))
config.add_attribute(core.Attribute(name="random_crop", type="bool", value="false"))
config.add_attribute(core.Attribute(name="standardize", type="bool", value="false"))
return config
def __init__(self):
# Download the mnist dataset.
if not os.path.exists(LOCAL_DIR):
os.makedirs(LOCAL_DIR)
if not os.path.exists(LOCAL_DIR + ARCHIVE_NAME):
print("Downloading...")
urllib.request.urlretrieve(REMOTE_URL, LOCAL_DIR + ARCHIVE_NAME)
if not os.path.exists(LOCAL_DIR + DATA_DIR):
print("Extracting files...")
tar = tarfile.open(LOCAL_DIR + ARCHIVE_NAME)
tar.extractall(LOCAL_DIR)
tar.close()
def call(self):
batches = {
"train": TRAIN_BATCHES,
"test": TEST_BATCHES
}[self.split]
all_images = []
all_labels = []
for batch in batches:
with open("%s%s%s" % (LOCAL_DIR, DATA_DIR, batch), "rb") as fo:
blob = cPickle.load(fo)
images = np.array(blob["data"])
labels = np.array(blob["fine_labels"])
num = images.shape[0]
images = np.reshape(images, [num, 3, IMAGE_SIZE, IMAGE_SIZE])
images = np.transpose(images, [0, 2, 3, 1])
print("Loaded %d examples." % num)
all_images.append(images)
all_labels.append(labels)
all_images = np.concatenate(all_images)
all_labels = np.concatenate(all_labels)
def _parse(image, label):
image = tf.to_float(image)
image = tf.reshape(image, [IMAGE_SIZE, IMAGE_SIZE, 3])
if self.random_crop:
image = tf.image.resize_image_with_crop_or_pad(
image, IMAGE_SIZE + 4, IMAGE_SIZE + 4)
image = tf.random_crop(image, [IMAGE_SIZE, IMAGE_SIZE, 3])
image = tf.image.random_flip_left_right(image)
if self.standardize:
image = tf.image.per_image_standardization(image)
return image, label
d = tf.contrib.data.Dataset.from_tensor_slices(
(all_images, all_labels))
d = d.cache()
d = d.repeat()
if self.split == "train":
d = d.shuffle(self.batch_size * NUM_SHUFFLE_BATCHES)
d = d.map(_parse, num_threads=NUM_THREADS)
d = d.batch(self.batch_size)
it = d.make_one_shot_iterator()
images, labels = it.get_next()
return dict(images=images, labels=labels)
|
23,074 | 9ed85e17b4dd97524f8cc32c5f1ebce688325b29 | #!/usr/bin/env python
import binascii
import sys
def compute_crc32(filename):
buf = open(filename,'rb').read()
buf = (binascii.crc32(buf) & 0xFFFFFFFF)
return "%08x" % buf
fn = sys.argv[1]
crc32v = compute_crc32(fn)
print crc32v
|
23,075 | e9625f9868e6dcb455a8042ad8efc0d3b637b3b4 | from pydantic import Field, BaseModel, validator, ValidationError
from typing import List, Optional
class UserModel(BaseModel):
name: str
password: str
@validator('password')
def password_validation(cls, v):
if len(v) < 6:
raise ValidationError("Password should contain at least 6 symbols")
return v
|
23,076 | c2be3a1fc51eaaeae8830c7e4c3e27822d10a0cb | import os
from time import sleep
from dotenv import load_dotenv
from spotipy.oauth2 import SpotifyClientCredentials, SpotifyOAuth
import spotipy
import sys
import webbrowser
import pyautogui
from time import sleep
import json
bandera = False
load_dotenv()
ID_SPOTIFY = os.getenv("ID_SPOTIFY")
ID_SPOSECRET = os.getenv("ID_SPOSECRET")
SPOTIFY_URI = os.getenv("SPOTIFY_URI")
#esto funciona con una sola cancion
#autor = ''
#cancion_b = 'bad guy'.lower()
#esto funciona con una sola cancion
#if len(autor) > 0:
# sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials(ID_SPOTIFY, ID_SPOSECRET))
# resultado = sp.search(autor)
# for i in range(0, len(resultado['tracks']['items'])):
# cancion = resultado['tracks']['items'][i]['name'].lower()
# if cancion in cancion_b:
# bandera = True
# webbrowser.open(resultado['tracks']['items'][i]['uri'])
#
#if bandera == False:
# cancion_b = cancion_b.replace(" ", "%20")
# webbrowser.open(f'spotify:search:{cancion_b}')
# sleep(5)
# for i in range(30):
# pyautogui.press("tab")
# #sleep(1)
# pyautogui.press("enter")
#
#tratando de que funcione con playlists
#Funciona pero solo si se spotify pasa a ser la app usada, para poder poner los comandos. Ya que pyautogui solo emula las teclas.
client_credentials_manager = SpotifyClientCredentials(ID_SPOTIFY, ID_SPOSECRET)
sp = spotipy.Spotify(client_credentials_manager=SpotifyClientCredentials(ID_SPOTIFY, ID_SPOSECRET))
#print(resultado)
resultado = sp.playlist("53zjvfwOfcktTLwbW9wvID")
#for i in range(0, len(resultado['items'])):
# cancion = resultado['items'][i]['track']['uri']
# duracion = resultado['items'][i]['track']['duration_ms']
#
#
# cancion = ''
# duracion = 0
# print(cancion)
# print(duracion/1000)
webbrowser.open(resultado['uri'])
sleep(5)
b = True
if b == True:
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('enter')
b = False
|
23,077 | bca8ebec49215ef9d347f85781d43b2971ce87e9 | import argparse
import binascii
import random
import sensor_pb2_pb2
import socket
import sys
import time
from collections import namedtuple
Reading = namedtuple("Reading", "temperature humidity")
last_reading = Reading(27, 50)
def parse_args():
parser = argparse.ArgumentParser(description='Protobuf Test Client')
parser.add_argument(
"--dev-id",
required=True,
type=int,
help="Numeric device ID.")
parser.add_argument(
"--dev-name",
help="Device name.")
parser.add_argument(
"--host",
default="localhost",
help="The IP address of the controller.")
parser.add_argument(
"--port",
default=48003,
type=int,
help="The port that the controller is listening on.")
args = parser.parse_args()
return args
def send_packet(sock, host, port, payload):
''' Sends header and payload to controller. '''
payload_len = len(payload)
header = bytearray(1)
header[0] = payload_len
assert(payload_len == header[0]) # Make sure one byte is enough for len!
#print(binascii.hexlify(msg_bytes))
bc = sock.sendto(header+payload, (host, port))
print("Sent {} bytes".format(bc))
return bc
def send_connect(sock, host, port, dev_id):
''' Sends a Connect message to the controller '''
con_msg = sensor_pb2_pb2.Connect()
con_msg.dev_id.CopyFrom(dev_id)
msg = sensor_pb2_pb2.Msg()
msg.msg_type = sensor_pb2_pb2.Msg.CONNECT
msg.connect_msg.CopyFrom(con_msg)
payload = msg.SerializeToString()
return send_packet(sock, host, port, payload)
def send_report(sock, host, port, dev_id, reading):
''' Sends a Report message to the controller '''
rpt_msg = sensor_pb2_pb2.Report()
rpt_msg.dev_id.CopyFrom(dev_id)
rpt_msg.data.temperature = int(reading.temperature * 10)
rpt_msg.data.humidity = int(reading.humidity * 10)
msg = sensor_pb2_pb2.Msg()
msg.msg_type = sensor_pb2_pb2.Msg.REPORT
msg.report_msg.CopyFrom(rpt_msg)
payload = msg.SerializeToString()
return send_packet(sock, host, port, payload)
def handle_command(sock, host, port, cmd_msg, dev_id):
''' Process Command messages coming from the controller. '''
global last_reading
if cmd_msg.cmd_type == sensor_pb2_pb2.Command.REPORT_DATA:
print("Sending latest sensor reading.")
send_report(sock, host, port, dev_id, last_reading)
else:
print("Unknown command.")
def take_reading():
''' Simulate reading data from somewhere. '''
global last_reading
t = last_reading.temperature + random.uniform(-0.25, 0.25)
h = last_reading.humidity + random.uniform(-1, 1)
d = Reading(t, h)
last_reading = d
return d
def main():
args = parse_args()
# Create our ID object
my_id = sensor_pb2_pb2.DeviceIdentification()
my_id.id = args.dev_id
if args.dev_name:
my_id.name = args.dev_name
# Set up UDP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setblocking(0)
# Tell the controller we exist
send_connect(sock, args.host, args.port, my_id)
time_to_next_reading = 5
while True:
# See if we've gotten anything from the controller.
try:
packet = sock.recv(1024)
msg = sensor_pb2_pb2.Msg()
msg.ParseFromString(packet[1:])
if msg.msg_type == sensor_pb2_pb2.Msg.COMMAND:
handle_command(sock, args.host, args.port, msg.command_msg, my_id)
else:
print("Unexpected message type rec'd")
except Exception as ex:
#template = "An exception of type {0} occurred. Arguments:\n{1!r}"
#message = template.format(type(ex).__name__, ex.args)
#print message
time.sleep(1)
# Take another sensor reading?
time_to_next_reading -= 1
if time_to_next_reading <= 0:
take_reading()
time_to_next_reading = 5
sys.exit(0)
if __name__ == "__main__":
main()
|
23,078 | cbcefadb34eea188b25b35c6b3e7e071ffd28498 | #from pymol import cmd, stored
import re
import sys
import os
suffix = "_TopDes_"
def create_output_states(name, folder, suffix, num_des, res_list = []):
num_des = int(num_des)
res_list = [str(x) for x in res_list]
for x in range(1, num_des+1):
x = (4-len(str(x)))*"0" + str(x)
cmd.create("%s"%name, "*%s%s*"%(suffix, x), 1, x)
cmd.delete("*%s*"%suffix)
cmd.show_as("cartoon", "%s"%name)
cmd.show("spheres", "het")
cmd.show("sticks", "resn CRO")
cmd.select("Imp_Res", "resi %s"%("+".join(res_list)))
cmd.show("sticks", "Imp_Res")
cmd.save("%s/%s.pse"%(folder, name), "%s"%name, 0, "pse")
for root, dirs, files in os.walk("GFP/DomInsertion/ShortDomain/RosRemodelTop3/Top3Relax/FinalRelax/D229L239_Best", topdown=False):
print(root)
name = root.split("/")
#name[0] is name for create_output fxn
#print(name)
for value in files:
if suffix in value:
#print(value[:-4])
print(value)
cmd.load("%s/%s"%(root, value))
pdb_num = int(value.split("_")[-1][:-4])
if pdb_num > 1:
#print(value, value[:-8])
cmd.align("%s"%value[:-4], "%s_0001"%value[:-9])
create_output_states(name[-1], "/".join(name[:-1]), suffix, 300, [9, 20, 22, 40, 42, 44, 51, 144])
#cmd.delete("all")
|
23,079 | 9aff5650d9b3a44f82d08b6dfc7003c6bb2340d9 | '''
objective: quick test to see how to display a line with the marker type
'''
import rospy
import time
import numpy as np
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
from geometry_msgs.msg import Point
rospy.init_node('thing',anonymous=True)
pub = rospy.Publisher('markers',MarkerArray,queue_size=10)
p_arr=[]
for i in np.linspace(0,6,10):
p = Point()
p.x=i
p.y=np.sin(i)
p_arr.append(p)
counter=0
test = Marker()
test.header.frame_id = 'map'
test.id=0
test.scale.x=0.5
test.lifetime.secs=1
test.color.r=1.0
test.color.b=1.0
test.color.a=1.0
test.type=4
test.points=p_arr
# ###################################
start = Marker()
start.header.frame_id = 'map'
start.id=1
start.pose.position.x = 0
start.pose.position.x = 0
start.scale.x=0.5
start.scale.y=0.5
start.scale.z=0.5
start.lifetime.secs=1
start.color.g=1.0
start.color.a=1.0
start.type=3
# ###############################
together = MarkerArray()
together.markers.append(test)
together.markers.append(start)
while (not rospy.is_shutdown()):
# pub.publish(allmarkers)
pub.publish(together)
time.sleep(1)
# while
# eof
|
23,080 | 685d675956830da4993ba47070c1e21fedd05093 | # --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv(path)
loan_status = data['Loan_Status'].value_counts()
plt.figure()
plt.title('Loan Status')
plt.xlabel('Count')
plt.ylabel('loan status')
loan_status.plot(kind = 'bar')
plt.show()
#Code starts here
# --------------
#Code starts here
property_and_loan = data.groupby(['Property_Area','Loan_Status']).size().unstack()
property_and_loan.plot(kind='bar', stacked=False, figsize=(15,10))
plt.xlabel('Property Area')
plt.ylabel('Loan Status')
plt.xticks(rotation=45)
plt.show()
# --------------
#Code starts here
education_and_loan = data.groupby(['Education','Loan_Status']).size().unstack()
education_and_loan.plot(kind='bar', stacked=True, figsize=(10,8))
plt.xlabel('Education Status')
plt.ylabel('Loan Status')
plt.xticks(rotation=45)
plt.show()
# --------------
#Code starts here
graduate = data[data['Education']=='Graduate']
#print(graduate)
not_graduate = data[data['Education']=='Not Graduate']
graduate.plot(kind='density',label='Graduate')
not_graduate.plot(kind='density',label='Not Graduate')
#Code ends here
#For automatic legend display
plt.legend()
# --------------
#Code starts here
fig, (ax_1,ax_2,ax_3) = plt.subplots(nrows = 3 , ncols = 1)
res1 = data.groupby(['ApplicantIncome','LoanAmount'])
res1.plot.scatter(x='ApplicantIncome',y='LoanAmount', ax=ax_1)
ax_1.set_title('Applicant Income ')
res2 = data.groupby(['CoapplicantIncome','LoanAmount'])
res2.plot.scatter(x='CoapplicantIncome',y='LoanAmount', ax=ax_2)
ax_1.set_title('Coapplicant Income')
res2 = res1.fillna(0)
data['TotalIncome'] = data['ApplicantIncome'] + data['CoapplicantIncome']
res3 =data.groupby(['TotalIncome','LoanAmount'])
res3.plot.scatter(x='TotalIncome',y='LoanAmount', ax=ax_3)
ax_1.set_title('TotalIncome')
|
23,081 | 3222130be91c298611922209a5d9fd698554d5da | # from distutils.core import setup
# setup(name='压缩包的名字',version='1.0',author='李彦达',py_modules=['my_package.module1'])
# 导入这个模块要把这个模块放到python shell相同目录下 才会找到
# 在python exe环境导入一个模块:python解释器寻找:
# 1.当前目录 2.如果不在那么去PYTHONPATH下的每个目录去寻找 3.如果还不在就去查看默认路径(每个系统不一样)
# def say_hi():
# print("这是我的第一个模块")
# __version__='0.1'
import os
for dir in os.walk('d:\\a'):
print(dir) |
23,082 | e86a8fb174e988bb55bc5617821c6f414ab9f139 | class Solution:
def getPermutation(self, n: int, k: int) -> str:
fact=1
number=[]
for i in range(1,n):
fact=fact*i
number.append(i)
# print(fact)
number.append(n)
k=k-1
ans=""
while True:
ans+=str(number[int(k//fact)])
# print(ans)
number.remove(number[int(k//fact)])
# print(number)
if len(number)==0:
break
k%=fact
fact/=len(number)
# print(fact)
return ans
|
23,083 | 21deee4d8bbb1d05b0be768aa7a4b52cc3b8885c | #python 3.6
# -*- coding:utf-8 -*-
__author__ = 'ZYH'
import json
import requests
from tqdm import tqdm
# headers={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36'}
# url = 'https://api.github.com/repos/elastic/elasticsearch/pulls/comments?per_page=100&page={}'
# total_page = 1053
# total_data = []
#
# for page in tqdm(range(1,total_page)):
# try:
# r = requests.get(url.format(page), headers=headers)
# print("Status Code:", r.status_code)
# s = json.loads(r.content)
# total_data.extend(s)
# except Exception as e:
# print(e)
# filename = 'pr_comment.json'
# with open(filename, 'w') as file_obj:
# json.dump(total_data, file_obj)
pulls_comment_dict = dict()
pulls_comments = json.load(open("pr_comment.json", "r"))
for pulls_comment in pulls_comments:
if not isinstance(pulls_comment,dict):
continue
pulls_comment_dict[pulls_comment['user']['login']] = dict()
pulls_comment_dict[pulls_comment['user']['login']]['name'] = pulls_comment['user']['login']
filename = 'pr_comments.json'
with open(filename, 'w') as file_obj:
json.dump(pulls_comment_dict, file_obj) |
23,084 | 81849d153ba7fd08a89eed870a2a7b030ef950bf | import os
import time
def reset():
a=open('list.txt','w')
a.close()
a=open('log.txt','w')
a.close()
l='all reset'
pri1(l)
input('Press Enter to go back')
ghg
def pri(a):
s=''
sa=0
for i in a:
os.system('cls')
s=s+a[sa]
print(s)
time.sleep(0.0000001)
sa=sa+1
def pri1(a):
s=''
sa=0
for i in a:
os.system('cls')
s=s+a[sa]
print(s)
time.sleep(0.25)
sa=sa+1
q=''
ans=''
ss=['enter root','Enter starting zone','Enter ending zone','Enter all stands in roots','Enter total time','Enter total distance']
class don:
def find(self):
os.system('cls')
wee=open('log.txt','r')
we=input('?Enter root')
w=wee.readlines()
s=0
qx=''
for i in w:
if i==str(we)+'\n':
se=0
for j in range(6):
qwe=str(ss[se]+':-'+w[s])
qx=qx+qwe+'\n'
se=se+1
s=s+1
pri(qx)
else:
s=s+1
input(' SERCH IS STOP --Press Enter to main menu')
ghg
def creat(self,A):
f=open('log.txt','a+')
xc='WELCOM \n'+str(A)
pri1(xc)
input('Press Enter to start')
print('''1|Creating
2|Deleting''')
ans1=int(input('enter your option'))
if ans1==1:
os.system('cls')
root=''
input(':---press enter to start creating list')
root=(input('enter root'))
ab=open('list.txt','r')
aa=ab.readlines()
ab.close()
aa.append(root)
ab=open('list.txt','w')
for i in aa:
ab.write(i)
ab.write('\n')
ab.close()
f.write(str(root))
f.write('\n')
q1=input('Enter starting zone')
f.write(str(q1))
f.write('\n')
q2=input('Enter ending zone')
f.write(str(q2))
f.write('\n')
q3=input('Enter all stands in roots ')
f.write(str(q3))
f.write('\n')
q4=input('Enter total time ')
f.write(str(q4))
f.write('\n')
q5=input('Enter total distance ')
f.write(str(q5))
f.write('\n')
f.close()
input('Press Enter to go back')
huh
else:
os.system('cls')
oo=[]
x=input('enter root')
d=open('log.txt','a+')
z=0
c=d.readlines()
for i in c:
if x==i:
for l in range(7):
c.pop(z)
else:
s=s+1
oo.append(i)
d.close()
h=open('log.txt','w')
for i in c:
h.writelines(i)
h.write('\n')
h.close()
gh
def up(self):
while True:
ax=input('Enter your name:--')
e=input('enter login password or reset password:--')
if e=='123':
self.creat(ax)
break
elif e=='clsall':
reset()
else:
continue
def goo(self):
print ('ALL ROOTS RECORDS PRESENT OR NOT PRESENT')
A=open('list.txt','r')
aq=A.readlines()
asd=''
for i in aq:
asd=asd+str(i)
A.close()
pri1(asd)
input('Press Enter to go back')
def home():
os.system('cls')
a=('''
DDDDDDDDDDDDDDD TTTTTTTTTTTTTTTT CCCCCCCCCCCCCCCC
DDDDDDDDDDDDDDD TTTTTTTTTTTTTTTT CCCCCCCCCCCCCCCC
DD DD TTTT CCC
DD DD TTTT CCC
DD DD TTTT CCC
DD DD TTTT CCC
DDDDDDDDDDDDDDD TTTT CCCCCCCCCCCCCCCC
DDDDDDDDDDDDDDD TTTT CCCCCCCCCCCCCCCC''')
pri(a)
print('''
1|Find By root
2|User login
3|About all recodes''')
print(q)
aa= don()
home()
while True:
try:
ans=int(input('enter your option'))
if ans in (1,2,3):
if ans==1:
aa.find()
elif ans==3:
aa.goo()
else:
aa.up()
else:
q=('''"ENTER ONE OF THIS "''')
home()
continue
break
except:
q=('''"ENTER ONLY INT "''')
home()
continue
|
23,085 | 0cd72ce3fd6a911ba5489e33282526d97388f702 | # system & 3rd party package
from flask import Flask, request, session, redirect, url_for, escape, abort
import json, urlparse, os.path, time, string, random
app = Flask(__name__)
# set app to debug mode to enable jsonp for all operations
app.debug = True
# put static files under static folder, it can be accessed without through flask route
@app.before_request
def preProcess():
return
@app.after_request
def postProcess(resp):
if '.gz' in request.url:
resp.headers["Content-Encoding"] = "gzip"
return resp
if __name__ == '__main__':
app.run(debug=True) |
23,086 | 39219175711141904526b8529f88bb624af3a5b3 | #!usr/bin/env python
# -*- coding: utf-8 -*-
import json
class MqttSubscribeObject:
topic = ""
messageCallBack = None
def __init__(self, topic, messageCallBack):
self.topic = topic
self.messageCallBack = messageCallBack
def callBack(self, topic, payload, **kwargs):
# print(topic)
# print(payload)
# print(kwargs)
self.messageCallBack(json.loads(payload))
|
23,087 | acca0f49c6a205b50963a39cfb518b8f221e2990 |
#! /usr/bin/env python
def woman(str_arg):
have_number(str_arg)
print('bad_group')
def have_number(str_arg):
print(str_arg)
if __name__ == '__main__':
woman('old_life')
|
23,088 | 1f1430fd4d1715418582fbc09c3354b0dbf26b7c | import sys
from time import time
from ._core import blue, bold, pprint, red, width, wrap_text
def banner():
from limix import __version__
pyver = sys.version.split("\n")[0].strip()
return "Running Limix {} using Python {}.".format(__version__, pyver)
def add_title_header(title, df):
msg = repr(df)
k = msg.find("\n") - len(title) - 2
left = ("-" * (k // 2)) + " "
right = " " + ("-" * (k // 2 + k % 2))
out = left + title + right + "\n"
out += msg
return out
class session_line(object):
"""
Print the elapsed time after the execution of a block of code.
"""
def __init__(self, desc="Running... ", disable=False):
self._disable = disable
self._tstart = None
self._desc = desc
self.elapsed = None
def __enter__(self):
self._tstart = time()
if not self._disable:
sys.stdout.write(self._desc)
sys.stdout.flush()
return self
def __exit__(self, exception_type, exception_value, traceback):
from humanfriendly import format_timespan
from limix.__config__ import get_info
self.elapsed = time() - self._tstart
fail = exception_type is not None
if not self._disable:
if get_info("rich_text") and not get_info("building_doc"):
# New line, get back to previous line, and advance cursor to the end
# of the line. This allows us to always get back to the right cursor
# position, as long as the cursor is still in the correct line.
print("\n\033[1A\033[{}C".format(len(self._desc)), end="")
if fail:
msg = bold(red("failed"))
msg += " ({}).".format(format_timespan(self.elapsed))
pprint(msg)
else:
print("done (%s)." % format_timespan(self.elapsed))
sys.stdout.flush()
class session_block(object):
"""Print session block: session start and session end."""
def __init__(self, session_name, disable=False):
self._session_name = session_name
self._start = None
self._disable = disable
def __enter__(self):
self._start = time()
msg = " {} session starts ".format(self._session_name)
if not self._disable:
msg = wrap_text(msg, width())
pprint(bold(blue(msg)))
def __exit__(self, exception_type, exception_value, traceback):
elapsed = time() - self._start
fail = exception_type is not None
if fail:
msg = " {} session fails in {:.2f} seconds "
color = red
else:
msg = " {} session ends in {:.2f} seconds "
color = blue
msg = msg.format(self._session_name, elapsed)
if not self._disable:
msg = wrap_text(msg, width())
pprint(bold(color(msg)))
def indent(txt, size=2):
space = " " * size
return space + ("\n" + space).join(txt.split("\n"))
|
23,089 | 6e9fccf778b29d1cff42cd4c680807a383da5e15 | import unittest
import client_encryption.jwe_encryption as to_test
from client_encryption.jwe_encryption_config import JweEncryptionConfig
from tests import get_mastercard_config_for_test
class JweEncryptionTest(unittest.TestCase):
def setUp(self):
self._config = JweEncryptionConfig(get_mastercard_config_for_test())
self._config._paths["$"]._to_encrypt = {"$": "$"}
self._config._paths["$"]._to_decrypt = {"encryptedValue": "$"}
def test_encrypt_payload_should_be_able_to_be_decrypted(self):
payload = {
"data": {
"field1": "value1",
"field2": "value2"
}
}
encrypted_payload = to_test.encrypt_payload(payload, self._config)
decrypted_payload = to_test.decrypt_payload(encrypted_payload, self._config)
self.assertDictEqual(payload, decrypted_payload)
def test_encrypt_payload_should_be_able_to_decrypt_empty_json(self):
payload = {}
encrypted_payload = to_test.encrypt_payload(payload, self._config)
decrypted_payload = to_test.decrypt_payload(encrypted_payload, self._config)
self.assertDictEqual(payload, decrypted_payload)
def test_encrypt_payload_should_be_able_to_decrypt_root_arrays(self):
payload = [
{
'field1': 'field2'
}
]
encrypted_payload = to_test.encrypt_payload(payload, self._config)
decrypted_payload = to_test.decrypt_payload(encrypted_payload, self._config)
self.assertListEqual(payload, decrypted_payload)
def test_encrypt_payload_with_multiple_encryption_paths(self):
self._config._paths["$"]._to_encrypt = {"data1": "encryptedData1", "data2": "encryptedData2"}
self._config._paths["$"]._to_decrypt = {"encryptedData1": "data1", "encryptedData2": "data2"}
payload = {
"data1": {
"field1": "value1",
"field2": "value2"
},
"data2": {
"field3": "value3",
"field4": "value4"
}
}
encrypted_payload = to_test.encrypt_payload(payload, self._config)
self.assertNotIn("data1", encrypted_payload)
self.assertNotIn("data2", encrypted_payload)
decrypted_payload = to_test.decrypt_payload(encrypted_payload, self._config)
self.assertDictEqual(payload, decrypted_payload)
def test_decrypt_payload_should_decrypt_aes128gcm_payload(self):
encrypted_payload = {
"encryptedValue": "eyJlbmMiOiJBMTI4R0NNIiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.WtvYljbsjdEv-Ttxx1p6PgyIrOsLpj1FMF9NQNhJUAHlKchAo5QImgEgIdgJE7HC2KfpNcHiQVqKKZq_y201FVzpicDkNzlPJr5kIH4Lq-oC5iP0agWeou9yK5vIxFRP__F_B8HSuojBJ3gDYT_KdYffUIHkm_UysNj4PW2RIRlafJ6RKYanVzk74EoKZRG7MIr3pTU6LIkeQUW41qYG8hz6DbGBOh79Nkmq7Oceg0ZwCn1_MruerP-b15SGFkuvOshStT5JJp7OOq82gNAOkMl4fylEj2-vADjP7VSK8GlqrA7u9Tn-a4Q28oy0GOKr1Z-HJgn_CElknwkUTYsWbg.PKl6_kvZ4_4MjmjW.AH6pGFkn7J49hBQcwg.zdyD73TcuveImOy4CRnVpw"
}
decrypted_payload = {"foo": "bar"}
payload = to_test.decrypt_payload(encrypted_payload, self._config)
self.assertNotIn("encryptedValue", payload)
self.assertDictEqual(decrypted_payload, payload)
def test_decrypt_payload_should_decrypt_aes192gcm_payload(self):
encrypted_payload = {
"encryptedValue": "eyJlbmMiOiJBMTkyR0NNIiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.FWC8PVaZoR2TRKwKO4syhSJReezVIvtkxU_yKh4qODNvlVr8t8ttvySJ-AjM8xdI6vNyIg9jBMWASG4cE49jT9FYuQ72fP4R-Td4vX8wpB8GonQj40yLqZyfRLDrMgPR20RcQDW2ThzLXsgI55B5l5fpwQ9Nhmx8irGifrFWOcJ_k1dUSBdlsHsYxkjRKMENu5x4H6h12gGZ21aZSPtwAj9msMYnKLdiUbdGmGG_P8a6gPzc9ih20McxZk8fHzXKujjukr_1p5OO4o1N4d3qa-YI8Sns2fPtf7xPHnwi1wipmCC6ThFLU80r3173RXcpyZkF8Y3UacOS9y1f8eUfVQ.JRE7kZLN4Im1Rtdb.eW_lJ-U330n0QHqZnQ._r5xYVvMCrvICwLz4chjdw"
}
decrypted_payload = {"foo": "bar"}
payload = to_test.decrypt_payload(encrypted_payload, self._config)
self.assertNotIn("encryptedValue", payload)
self.assertDictEqual(decrypted_payload, payload)
def test_decrypt_payload_should_decrypt_aes256gcm_payload(self):
encrypted_payload = {
"encryptedValue": "eyJraWQiOiI3NjFiMDAzYzFlYWRlM2E1NDkwZTUwMDBkMzc4ODdiYWE1ZTZlYzBlMjI2YzA3NzA2ZTU5OTQ1MWZjMDMyYTc5IiwiY3R5IjoiYXBwbGljYXRpb25cL2pzb24iLCJlbmMiOiJBMjU2R0NNIiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.8c6vxeZOUBS8A9SXYUSrRnfl1ht9xxciB7TAEv84etZhQQ2civQKso-htpa2DWFBSUm-UYlxb6XtXNXZxuWu-A0WXjwi1K5ZAACc8KUoYnqPldEtC9Q2bhbQgc_qZF_GxeKrOZfuXc9oi45xfVysF_db4RZ6VkLvY2YpPeDGEMX_nLEjzqKaDz_2m0Ae_nknr0p_Nu0m5UJgMzZGR4Sk1DJWa9x-WJLEyo4w_nRDThOjHJshOHaOU6qR5rdEAZr_dwqnTHrjX9Qm9N9gflPGMaJNVa4mvpsjz6LJzjaW3nJ2yCoirbaeJyCrful6cCiwMWMaDMuiBDPKa2ovVTy0Sw.w0Nkjxl0T9HHNu4R.suRZaYu6Ui05Z3-vsw.akknMr3Dl4L0VVTGPUszcA"
}
decrypted_payload = {"foo": "bar"}
payload = to_test.decrypt_payload(encrypted_payload, self._config)
self.assertNotIn("encryptedValue", payload)
self.assertDictEqual(decrypted_payload, payload)
def test_decrypt_payload_should_decrypt_cbc_payload(self):
encrypted_payload = {
"encryptedValue": "eyJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiUlNBLU9BRVAtMjU2In0.2GzZlB3scifhqlzIV2Rxk1TwiWL35e0AtcI9MFusG9jv9zGrJ8BapJx73PlFu69S0IAR7hXpqwzD7-UzmHUdrxB7izbMm9TNDpznHIuTaJWSRngD5Zui_rUXETL0GJG8dERx7IngqTltfzZanhDnjDNfKaowD6pFSEVN-Ff-pTeJqLMPs5504DtnYGD_uhQjvFmREIBgQTGEINzT88PXwLTAVBbWbAad_I-4Q12YwW_Y4yqmARCMTRWP-ixMrlSWCJlh6hz-biEotWNwGvp2pdhdiEP2VSvvUKHd7IngMWcMozOcoZQ1n18kWiFvt90fzNXSmzTjyGYSWUsa_mVouA.aX5mOSiXtilwYPFeTUFN_A.ZyAY79BAjG-QMQIhesj9bQ.TPZ2VYWdTLopCNkvMqUyuQ"
}
decrypted_payload = {"foo": "bar"}
payload = to_test.decrypt_payload(encrypted_payload, self._config)
self.assertNotIn("encryptedValue", payload)
self.assertDictEqual(decrypted_payload, payload)
|
23,090 | bd1ebecbe92903ef813019a19c532a152196afaf | import numpy as np
from datetime import datetime
from datetime import timedelta
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from tools_BAIU import get_lonlat, get_var, get_prapiroon, read_etrack
quick = True
#quick = False
ng = 3
kernel = np.ones( (ng,ng)) / (ng**2)
def main(
ctime=datetime( 2018, 7, 5, 0 ),
stime=datetime( 2018, 7, 5, 0 ),
stime_ref=datetime( 2018, 6, 27, 0 ),
dlon=1.0,
adt_h = 24,
vtime_ref=datetime( 2018, 7, 6, 0 ),
):
mmax = 50 # debug
TOP = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/BAIU2018_5.3.6"
INFO = {"TOP": TOP, }
lon2d, lat2d = get_lonlat( INFO, stime=datetime( 2018, 7, 1, 0 ) )
slon = 130.0
elon = 137.5
slat = 33.0
elat = 36.0
adt = timedelta( hours=adt_h )
# get rain ensemble
rain_l = np.zeros( mmax )
# get reference
for m in range( mmax ):
#rain_ = get_arain( INFO, stime=stime, vtime=vtime_ref, adt=adt, m=m+1 )
rain_ = get_var( INFO, nvar="RAIN", stime=stime, vtime=vtime_ref, m=m+1, adt=adt )
rain_l[m] = np.mean( rain_[ (lon2d >= slon ) & (lon2d <= elon) & (lat2d >= slat) & (lat2d <= elat) ] )
# print( rain_l )
mem_l = np.argsort( rain_l )[::-1]
rain_l = np.sort( rain_l )[::-1]
tclon_l, tclat_l, tcmslp_l, time_l = read_etrack( stime=stime,
ng=ng, dlon=dlon )
for i, time_ in enumerate( time_l ):
dt = ( time_ - ctime ).total_seconds()
if dt == 0.0:
cit = i
fig, ( ( ax1, ax2 ) ) = plt.subplots( 1, 2, figsize=( 11, 5.0 ) )
fig.subplots_adjust( left=0.1, bottom=0.1, right=0.97, top=0.95,
wspace=0.15, hspace=0.2)
blon, blat, bmslp = get_prapiroon( time=ctime )
tclat_l_ = []
tclon_l_ = []
tcslp_l_ = []
#for m in range( mmax ):
for i, mem in enumerate( mem_l[:] ):
print( mem, rain_l[i])
tclat_l_.append( tclat_l[mem,cit] )
tclon_l_.append( tclon_l[mem,cit] )
tcslp_l_.append( tcmslp_l[mem,cit] )
tclon_l = np.array( tclon_l_)
tclat_l = np.array( tclat_l_)
tcslp_l = np.array( tcslp_l_)
err_l = np.sqrt( np.square( tclon_l[:] - blon ) +
np.square( tclat_l[:] - blat ) )
print( tcslp_l )
# err_l = tcslp_l[:] - bmslp
# err_l[ tclat_l > 40.0 ] = np.nan
#ax1.scatter( tclat_l_, rain_l )
ax1.scatter( tcslp_l, rain_l, c=rain_l, cmap="jet" )
ax2.scatter( err_l, rain_l, c=rain_l, cmap="jet" )
# ax1.scatter( tclon_l, rain_l )
# ax1.scatter( tclat_l, rain_l )
# xlab = "TC longitude (deg)"
xlab = "TC SLP valid at {0:} (hPa)".format( ctime.strftime('%HUTC %m/%d'), )
xlab2 = "TC position error valid at {0:} (deg)".format( ctime.strftime('%HUTC %m/%d'), )
ylab = "Forecast precipitation amount (mm)"
ax1.set_ylabel( ylab, fontsize=12 )
ax1.set_xlabel( xlab, fontsize=12 )
ax2.set_xlabel( xlab2, fontsize=12 )
note = "Period: {0:}\n-{1:}".format( ( vtime_ref - timedelta(hours=adt_h) ).strftime('%HUTC %m/%d'),
vtime_ref.strftime('%HUTC %m/%d'), )
ax1.text( 0.99, 0.8, note,
fontsize=10, transform=ax1.transAxes,
ha='right', va='top',
)
tit = "Initial: {0:}".format( stime.strftime('%HUTC %m/%d') )
fig.suptitle( tit, fontsize=14 )
# ax1.text( 0.99, 0.99, Initl,
# fontsize=12, transform=ax1.transAxes,
# ha='right', va='top', )
opath = "png/track"
ofig = "2p_scat_track_s{0:}_dlon{1:}_ng{2:0=3}_{3:}".format( stime.strftime('%m%d'), dlon, ng, ctime.strftime('%m%d%H'), )
if not quick:
os.makedirs(opath, exist_ok=True)
ofig = os.path.join(opath, ofig + ".png")
plt.savefig(ofig,bbox_inches="tight", pad_inches = 0.1)
print(ofig)
plt.clf()
else:
print(ofig)
plt.show()
sys.exit()
# plt.scatter( tclat_l_, rain_l[::-1] )
# plt.show()
# plt.scatter( tclon_l_, rain_l[::-1] )
# plt.show()
##################
stime = datetime( 2018, 6, 27, 0 )
etime = datetime( 2018, 7, 5, 0 )
stime = datetime( 2018, 6, 27, 0 )
etime = datetime( 2018, 7, 3, 0 )
#stime = datetime( 2018, 7, 3, 0 )
stime = datetime( 2018, 7, 2, 0 )
stime = datetime( 2018, 7, 1, 0 )
stime = datetime( 2018, 6, 30, 0 )
etime = stime
dlon = 1.0
dlon = 2.0
#dlon = 3.0
# TC mark
ctime = datetime( 2018, 7, 6, 0 )
ctime = datetime( 2018, 7, 1, 0 )
#ctime = datetime( 2018, 7, 6, 0 )
ctime = datetime( 2018, 6, 30, 0 )
#ctime = datetime( 2018, 7, 2, 0 )
#ctime = datetime( 2018, 7, 1, 0 )
adt_h = 48
vtime_ref = datetime( 2018, 7, 7, 0 )
#stime = etime
time = stime
while time <= etime:
main(
ctime=ctime,
stime=time,
dlon=dlon,
adt_h=adt_h,
vtime_ref=vtime_ref,
)
time += timedelta( days=1 )
|
23,091 | b123c3228addb09cefcaadf99d85abb84241dea9 | # -*- coding: utf8 -*-
import os
import sys
class Graphe:
def __init__(self):
self.sommets = []
self.arcs = []
self.incidence = {}
self.adjacence = {}
self.couleur={}
self.pere={}
self.debut={}
self.fin={}
self.temps=0
self.name=raw_input("Donnez un nom à votre fichier (sans extension)")
def get_name(self):
return self.name
def ajouter_sommet(self,value):
if value not in self.sommets:
self.sommets.append(value)
self.adjacence[value]=[]
else:
print "Ce sommet existe déjà"
def get_nb_sommets(self):
return len(self.sommets)
def ajouter_un_arc(self,S1,S2,A):
if A not in self.arcs:
if (S1 in self.sommets) and (S2 in self.sommets):
self.arcs.append(A)
self.incidence[A]=[S1,S2]
self.adjacence[S1].append(S2)
def liste_pere(self):
return self.pere
def liste_debut(self):
return self.debut
def liste_incidence(self):
return self.incidence
def liste_adjacence(self):
return self.adjacence
def liste_fin(self):
return self.fin
def nb_aretes(self):
arcs = 0
for i in range(len(self.arcs)):
arcs +=1
return arcs
def supprimer_arete(self,A):
index=0
if A in self.incidence:
for arc in self.arcs:
if(arc == A):
del self.arcs[index]
else:
index+=1
sommetsADelete = list(self.incidence[A])
del self.incidence[A]
for cle in self.adjacence:
if cle in sommetsADelete:
index2=0
for i in range(len(self.adjacence[cle])):
if self.adjacence[cle][index2] in sommetsADelete:
del self.adjacence[cle][index2]
index2-=1
index2+=1
else:
print "\nCette arête n'existe pas !\n"
def supprimer_sommet(self,S):
arcsAsupprimer = []
copySommets = list(self.sommets)
copyIncidence = self.incidence.copy()
copyArcs = list(self.arcs)
copyAdjacence = self.adjacence.copy()
if S in self.sommets:
for i in range(len(self.sommets)):
if self.sommets[i]== S:
del copySommets[i]
for arc in self.incidence:
if(S in self.incidence[arc]):
arcsAsupprimer.append(arc)
del copyIncidence[arc]
index = 0
for i in range(len(self.arcs)):
if self.arcs[i] in arcsAsupprimer:
del copyArcs[index]
index-=1
index +=1
#suppression des données liées à l'adjacence
for cle in self.adjacence:
index=0
for i in range(len(self.adjacence[cle])):
if self.adjacence[cle][index] == S:
del copyAdjacence[cle][index]
index-=1
index +=1
if cle == S:
del copyAdjacence[cle]
self.sommets = list(copySommets)
self.incidence = copyIncidence.copy()
self.arcs = list(copyArcs)
self.adjacence = copyAdjacence.copy()
else:
print "Ce sommet n'existe pas !"
def ecrire_graphe_non_oriente(self):
nom = self.name+"_non_oriente.dot"
fic=open(nom,"w")
fic.write('graph {')
fic.write('\n')
for arc in self.incidence:
fic.write(str(self.incidence[arc][0]))
fic.write(" -- ")
fic.write(str(self.incidence[arc][1])+"[label=\""+str(arc)+"\"];")
fic.write("\n")
fic.write("}")
fic.close()
def ecrire_graphe_oriente(self):
c=0
nom = self.name+"_oriente.dot"
fic=open(nom,"w")
fic.write('digraph {')
fic.write('\n')
for arc in self.incidence:
fic.write(str(self.incidence[arc][0]))
fic.write(" -> ")
fic.write(str(self.incidence[arc][1])+"[label=\""+str(arc)+"\",color="+couleur[c]+",penwidth=3.0];")
c+=1
fic.write("\n")
fic.write("}")
fic.close()
os.system('dot -Tpng '+self.name+'_oriente.dot -o '+self.name+'_oriente.dot')
choix=raw_input("Voulez-vous ouvrir le fichier contenant le graphe orienté ? y/n")
if choix == "y":
os.system('libreoffice '+self.name+'_oriente.dot &')
def ecrire_graphe_non_oriente(self):
c=0
nom = self.name+"_non_oriente.dot"
fic=open(nom,"w")
fic.write('graph {')
fic.write('\n')
for arc in self.incidence:
fic.write(str(self.incidence[arc][0]))
fic.write(" -- ")
fic.write(str(self.incidence[arc][1])+"[label=\""+str(arc)+"\",color="+couleur[c]+",penwidth=3.0];")
c+=1
fic.write("\n")
fic.write("}")
fic.close()
os.system('dot -Tpng '+self.name+'_non_oriente.dot -o '+self.name+'_non_oriente.dot')
choix=raw_input("Voulez-vous ouvrir le fichier contenant le graphe orienté ? y/n")
if choix == "y":
os.system('libreoffice '+self.name+'_non_oriente.dot &')
def PP(self):
for i in range(len(self.sommets)):
self.temps=0
self.couleur[self.sommets[i]]="blanc"
self.pere[self.sommets[i]]=None
self.debut[self.sommets[i]]=0
self.fin[self.sommets[i]]=0
for i in range(len(self.sommets)):
if self.couleur[self.sommets[i]]=="blanc":
self.visiterPP(self.sommets[i])
def visiterPP(self,S):
self.couleur[S]="gris"
self.temps +=1
self.debut[S]=self.temps
for cle in self.adjacence[S]:
if self.couleur[cle]=="blanc":
self.pere[cle]=S
self.visiterPP(cle)
self.couleur[S]="noir"
self.temps+=1
self.fin[S]=self.temps
def tri_topologique(self):
self.PP()
sommetsTries=[]
tri=[]
while len(sommetsTries)<len(self.liste_fin()):
maxi=0
for i in self.fin:
if self.fin[i]>maxi and self.fin[i] not in sommetsTries:
maxi=self.fin[i]
sommetsTries.append(maxi)
sommetsTries=sommetsTries[::-1]#on inverse pour avoir l'ordre croissant
for i in range(len(sommetsTries)):
for j in self.fin:
if sommetsTries[i]==self.fin[j]:
tri.append(j)
return tri
def inverser_arcs(self):
self.adjacence={}
for cle in self.incidence:
self.incidence[cle]=self.incidence[cle][::-1]
for sommets in self.sommets:
self.adjacence[sommets]=[]
for cle in self.incidence:
self.adjacence[self.incidence[cle][0]].append(self.incidence[cle][1])
print "\n\nLIste d'adjacence de l'inverse : ",self.liste_adjacence(),"\n\n"
self.PP()
sommetsTries=[]
tri=[]
while len(sommetsTries)<len(self.liste_fin()):
maxi=0
for i in self.fin:
if self.fin[i]>maxi and self.fin[i] not in sommetsTries:
maxi=self.fin[i]
sommetsTries.append(maxi)
for i in range(len(sommetsTries)):
for j in self.fin:
if sommetsTries[i]==self.fin[j]:
tri.append(j)
print "\n Sommets par ordre décroissants de fin : ",tri
couleur=["yellow","blue","green","pink","violet","brown","grey","magenta"]
graphe1 = Graphe()
graphe1.ajouter_sommet("A")
graphe1.ajouter_sommet("B")
graphe1.ajouter_sommet("C")
graphe1.ajouter_sommet("D")
graphe1.ajouter_sommet("E")
graphe1.ajouter_sommet("F")
graphe1.ajouter_sommet("G")
graphe1.ajouter_un_arc("A","B","arc1")
graphe1.ajouter_un_arc("B","D","arc2")
graphe1.ajouter_un_arc("B","F","arc3")
graphe1.ajouter_un_arc("A","C","arc4")
graphe1.ajouter_un_arc("C","G","arc5")
graphe1.ajouter_un_arc("A","E","arc6")
graphe1.ajouter_un_arc("F","E","arc7")
graphe1.ajouter_un_arc("A","A","arc8")
print graphe1.__dict__
print "\n\n"
# graphe1.supprimer_sommet("A")
graphe1.PP()
print "Tri topologique : ",graphe1.tri_topologique()
# graphe1.inverser_arcs()
# graphe1.inverser_arcs()
# graphe1.PP()
print "\n\n"
# graphe1.parcours_en_profondeur("B")
# graphe1.parcours_graphe("B")
print graphe1.__dict__
# print "Nombre de sommets : ",graphe1.get_nb_sommets()
# print "Liste des incidences : ",graphe1.liste_incidence()
# print "Nombre arêtes : ",graphe1.nb_aretes()
graphe1.ecrire_graphe_oriente()
# graphe1.ecrire_graphe_oriente()
# graphe1.ecrire_graphe_non_oriente()
|
23,092 | 6fcc555d614051797d80e09cc717bb165765df02 | from django.shortcuts import render, redirect # Importa o redirect
from .models import Transacao # Importa o model
from .form import TransacaoForm # Importa a classe do form
def cadastro_transacao(request): # Cria a função de cadastro
data = {} # Cria um dicionário vazio
form = TransacaoForm(request.POST or None) # Cria um form
# recebendo os dados do POST do bt Salvar ou vazio
if form.is_valid(): # Testa se o form é valido
form.save() # Salva os dados no Banco de Dados
return redirect('url_listagem') # Redireciona
data['form'] = form # Adiciona o form criado no dicionario
return render(request, "contas/cadastro.html", data)
def listagem(request):
data = {} # Cria um dicionário vazio
data["transacoes"] = Transacao.objects.all() # objects é um manager pronto
# do Django que nos permitirá acessar os dados de determinado model
return render(request, "contas/listagem.html", data)
def home(request):
return render(request, "contas/home.html")
'''Retorna um render permitindo renderizar um template , passando
como parametrosa request e o nome e caminho do template '''
|
23,093 | c8c7b30d468b355ee718f3f6fa396b07067524a7 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 16 10:32:44 2018
@author: H.T
"""
#
# 校准函数
# calibration.py
#
import scipy.optimize as spo
class Calibrate(object):
''' 构造终极校准类。
Parameters
==========
price_dif_class :
含有市场实际价格信息的价差类
Methods
=======
loc_fmin :
返回局部最小值
opti_param :
返回最优参数
'''
def __init__(self, price_dif_class):
self.dif_class = price_dif_class
def loc_fmin(self):
locmin = spo.brute(self.dif_class.get_dif, ((1, 5.1, 0.5),
(0.1, 1.1, 0.1),
(0.1, 1.1, 0.1),
(-0.99, 0.99, 0.19)), finish=None)
return locmin
def opti_param(self, initial_list):
bnds = ((0, 6), (0, 1), (0, 1), (-0.99, 0.99))
globalmin = spo.minimize(self.dif_class.get_dif, initial_list,
method='L-BFGS-B', bounds=bnds,
options={'disp': True})
if globalmin.success:
print('参数校准最优化成功!')
print(globalmin.message)
print('最优均方误差:%8.9f' % globalmin.fun)
print('kappa_v: %8.4f' % globalmin.x[0])
print('theta_v: %8.4f' % globalmin.x[1])
print('sigma_v: %8.4f' % globalmin.x[2])
print('rho: %8.4f' % globalmin.x[3])
return globalmin.x
else:
print('参数校准失败,请重新设定起始参数!')
print(globalmin.message)
print('终止校准均方误差:%8.9f' % globalmin.fun)
print('建议起始参数:')
print('kappa_v: %8.4f' % globalmin.x[0])
print('theta_v: %8.4f' % globalmin.x[1])
print('sigma_v: %8.4f' % globalmin.x[2])
print('rho: %8.4f' % globalmin.x[3])
return globalmin.x |
23,094 | 1f998a092f55f20ef6579045d08894c69c0b824a | import math
class Isotope():
# Class to describe radioactive isotopes and their decay chains (if
# applicable).
# Isotopes need consistent naming convention both internally and for display
# also needs to be consistent for daughter isotopes in decay chains.
def __init__(self, Z, half_life, NA, chain=None, name=None, branches=None):
self.Z = Z
self.half_life = half_life * 365.25 * 24. * 60. * 60.
self.NA = NA
self.lifetime = self.calculate_lifetime()
self.lam = self.calculate_lambda()
self.activity = self.calculate_activity()
self.chain = chain
self.branches = branches
if not chain:
self.contributors = [name]
else:
self.contributors = chain
self.name = name
def __repr__(self):
return f"Iso ({self.name})"
def calculate_lifetime(self) -> float:
return self.half_life / math.log(2)
def calculate_lambda(self) -> float:
return math.log(2) / self.half_life
def calculate_activity(self) -> float:
return 6.022e23 * self.lam / (self.Z / 1000)
# No alphas
chains = {
"U238": ['234Pa', '214Pb', '214Bi', '210Bi', '210Tl'],
"Th232": ['228Ac', '212Pb', '212Bi', '208Tl'],
"U235": ['231Th', '223Fr', '211Pb', '211Bi', '207Tl'],
"Rn222": ['214Pb', '214Bi', '210Bi', '210Tl'],
}
# Source periodictable.com
branches = {
"U238": {'234Pa': 1.0, '214Pb': 1.0, '214Bi': 1.0, '210Bi': 1.0, '210Tl': 0.0021},
"Th232": {'228Ac': 1.0, '212Pb': 1.0, '212Bi': 0.6405, '208Tl': 0.3594},
"U235": {'231Th': 1.0, '223Fr': 0.0138, '211Pb': 1.0, '211Bi': 0.00276, '207Tl': 1.34e-9},
"Rn222": {'214Pb': 1.0, '214Bi': 1.0, '210Bi': 1.0, '210Tl': 0.0021}
}
isotopes = {
"U238": Isotope(238, 4.47e9, 0.9928, chain=chains['U238'], branches=branches["U238"], name="U238"),
"U235": Isotope(235, 7.04e8, 0.0072, chain=chains['U235'], branches=branches["U235"], name="U235"),
"Th232": Isotope(232, 1.41e10, 0.9998, chain=chains['Th232'], branches=branches["Th232"], name="Th232"),
"Rn222": Isotope(222, 3.82/365.25, 1, chain=chains['Rn222'], branches=branches["Rn222"], name="Rn222"),
"K40": Isotope(40, 1.28e9, 0.000117, name="K40"),
"Gd152": Isotope(152, 1.08e14, 0.002, name="Gd152"),
"Co60": Isotope(60, 5.27, 1, name="Co60"),
"Cs137": Isotope(137, 30.17, 1, name="Cs137"),
}
|
23,095 | d8ef9f19c5d2dafdae1fd5630030d86326a7cd9c | def laptop_nuovo(ram,cpu,antivirus=False):
print('Il nuovo laptop avrà le seguenti caratteristiche: ')
print('Ram: ' +ram)
print('CPU: ' +cpu)
if antivirus==True:
print('Sei in possesso anche di un antivirus')
|
23,096 | fd453a3ae5266e708bbf50469939a03a3e0e5da7 | import pandas as pd
import matplotlib.pyplot as plt
from pandas import DataFrame as f
import nltk
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import LancasterStemmer, WordNetLemmatizer, PorterStemmer
import numpy as np
from textblob import TextBlob
data = pd.read_csv('srilanka.csv')
we=data.drop(['Author','date','Location'], axis = 1)
we['Review'] = we['Review'].astype(str)
#print(we['Review'][1])
we['Review'] = we['Review'].apply(lambda x: " ".join(x.lower() for x in x.split()))
stop = stopwords.words('english')
we['Review'] = we['Review'].apply(lambda x: " ".join(x for x in x.split() if x not in stop))
st = PorterStemmer()
we['Review'] = we['Review'].apply(lambda x: " ".join([st.stem(word) for word in x.split()]))
def senti(x):
return TextBlob(x).sentiment
we['senti_score'] = we['Review'].apply(senti)
#print(we['senti_score'].str.split(",",expand =True))
df = pd.DataFrame(we)
#df[df==' ']=np.nan
#a = df['senti_score'].dtype #str.split(',',expand=True)
df['senti'],df['xdr']=zip(*df.senti_score)
#print(df['senti'])
ne=0
pc=0
nn=0
for index, row in df.iterrows():
if (row["senti"] > 0):
pc=pc+1
elif(row["senti"] < 0):
ne=ne+1
else:
nn=nn+1
print ('posivive review: ',pc)
print ('neutral review: ',nn)
print ('negative review: ',ne)
#print(type(df['senti_score']))
#ee=we['date'].value_counts()
#print(we)
#print(f'data1 = {len(we[we["date"] == "2019"])}')
|
23,097 | 832adf33f0da42f91b3d946b25d16974d85d1520 | '''
Write a function that accepts two parameters, i) a string (containing a list of words)
and ii) an integer (n). The function should alphabetize the list based on the nth letter
of each word.
example:
function sortIt('bid, zag', 2) #=> 'zag, bid'
The length of all words provided in the list will be >= n. The format will be "x, x, x".
In Haskell you'll get a list of Strings instead.
Test.assert_equals(sort_it('bill, bell, ball, bull', 2),'ball, bell, bill, bull' , 'Sort by the second letter')
Test.assert_equals(sort_it('cat, dog, eel, bee', 3),'bee, dog, eel, cat' , 'Sort by the third letter')
'''
'''
def swap(i , j, list):
tmp = list[i]
list[i] = list[j]
list[j] = tmp
'''
def swap(i, j , list):
list[i], list[j] = list[j], list[i]
def sort_it(list_, n= None):
#return sorted( list_, key=list_[n] )
if n is None:
return list_
index = n-1
word_list = list_.split(', ')
for i in range(len(word_list) - 1):
for j in range(i+1, len(word_list)):
if word_list[i][index:] > word_list[j][index:]:
swap(i,j, word_list)
return ", ".join(word_list)
print(sort_it('bill, bell, ball, bull', 2))
print(sort_it("Arthur von Streit, Ernst von Eisenach, Ernst von Eisenach, Paul von Oberstein, Helmut Rennenkampf, Anton Ferner, Adalbert von Fahrenheit", 8))
print(sort_it('Oskar von Reuenthal, Neidhardt Muller, Karl Robert Steinmetz, Paul von Oberstein, Adalbert von Fahrenheit, Helmut Rennenkampf, Ernst von Eisenach, August Samuel Wahlen', None))
|
23,098 | 5334c219aaf9c115e6f88aab0e7a4e6a168fa36e | class timer:
hr=0
minn=0
sec=34
def __init__(self,h,m,s):
self.hr=h
self.minn=m
self.sec=s
def show(self):
print 'TIME:',self.hr,':',self.minn,':',self.sec
t1=timer(3,45,34)
t1.show()
t2=timer(2,12,56)
t2.show()
|
23,099 | 66221e16c82c4e5ecdb8d5bcef1bdf536c001797 | import tkinter as tk
class SudokuError(Exception):
pass |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.