blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3efb3fa0f33c9db9e23e81ccddbd12529703f1e8 | ddb8c14775dfbe9424691dabf1617273d118d317 | /catkin_ws/build/msg_check/catkin_generated/pkg.installspace.context.pc.py | d23c59c9aa6dcd0265af7cbe01246235712f19cc | [] | no_license | rishabhdevyadav/fastplanneroctomap | e8458aeb1f2d3b126d27dc57011c87ae4567687a | de9d7e49cb1004f3b01b7269dd398cf264ed92b4 | refs/heads/main | 2023-05-12T22:12:27.865900 | 2021-05-26T19:25:31 | 2021-05-26T19:25:31 | 356,674,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "${prefix}/include;/usr/include/eigen3".split(';') if "${prefix}/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "geometry_msgs;mav_msgs;nav_msgs;roscpp;rospy;sensor_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmsg_check".split(';') if "-lmsg_check" != "" else []
PROJECT_NAME = "msg_check"
PROJECT_SPACE_DIR = "/home/rishabh/catkin_ws/install"
PROJECT_VERSION = "2.1.2"
| [
"rishabhdevyadav95@gmail.com"
] | rishabhdevyadav95@gmail.com |
0e02e78b9bd8be2a809d040cede78b8f52514e05 | 3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/cloudfunctions/v2beta/cloudfunctions_v2beta_client.py | fcd7b507b5f649128645efa4b619ae74c347b2c0 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | twistedpair/google-cloud-sdk | 37f04872cf1ab9c9ce5ec692d2201a93679827e3 | 1f9b424c40a87b46656fc9f5e2e9c81895c7e614 | refs/heads/master | 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 | Python | UTF-8 | Python | false | false | 21,995 | py | """Generated client library for cloudfunctions version v2beta."""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.cloudfunctions.v2beta import cloudfunctions_v2beta_messages as messages
class CloudfunctionsV2beta(base_api.BaseApiClient):
"""Generated client library for service cloudfunctions version v2beta."""
MESSAGES_MODULE = messages
BASE_URL = 'https://cloudfunctions.googleapis.com/'
MTLS_BASE_URL = 'https://cloudfunctions.mtls.googleapis.com/'
_PACKAGE = 'cloudfunctions'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v2beta'
_CLIENT_ID = 'CLIENT_ID'
_CLIENT_SECRET = 'CLIENT_SECRET'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'CloudfunctionsV2beta'
_URL_VERSION = 'v2beta'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new cloudfunctions handle."""
url = url or self.BASE_URL
super(CloudfunctionsV2beta, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_locations_functions = self.ProjectsLocationsFunctionsService(self)
self.projects_locations_operations = self.ProjectsLocationsOperationsService(self)
self.projects_locations_runtimes = self.ProjectsLocationsRuntimesService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsFunctionsService(base_api.BaseApiService):
"""Service class for the projects_locations_functions resource."""
_NAME = 'projects_locations_functions'
def __init__(self, client):
super(CloudfunctionsV2beta.ProjectsLocationsFunctionsService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Creates a new function. If a function with the given name already exists in the specified project, the long running operation will return `ALREADY_EXISTS` error.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions',
http_method='POST',
method_id='cloudfunctions.projects.locations.functions.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['functionId'],
relative_path='v2beta/{+parent}/functions',
request_field='function',
request_type_name='CloudfunctionsProjectsLocationsFunctionsCreateRequest',
response_type_name='Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a function with the given name from the specified project. If the given function is used by some trigger, the trigger will be updated to remove this function.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}',
http_method='DELETE',
method_id='cloudfunctions.projects.locations.functions.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v2beta/{+name}',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsFunctionsDeleteRequest',
response_type_name='Operation',
supports_download=False,
)
def GenerateDownloadUrl(self, request, global_params=None):
r"""Returns a signed URL for downloading deployed function source code. The URL is only valid for a limited period and should be used within 30 minutes of generation. For more information about the signed URL usage see: https://cloud.google.com/storage/docs/access-control/signed-urls.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsGenerateDownloadUrlRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GenerateDownloadUrlResponse) The response message.
"""
config = self.GetMethodConfig('GenerateDownloadUrl')
return self._RunMethod(
config, request, global_params=global_params)
GenerateDownloadUrl.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:generateDownloadUrl',
http_method='POST',
method_id='cloudfunctions.projects.locations.functions.generateDownloadUrl',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v2beta/{+name}:generateDownloadUrl',
request_field='generateDownloadUrlRequest',
request_type_name='CloudfunctionsProjectsLocationsFunctionsGenerateDownloadUrlRequest',
response_type_name='GenerateDownloadUrlResponse',
supports_download=False,
)
def GenerateUploadUrl(self, request, global_params=None):
r"""Returns a signed URL for uploading a function source code. For more information about the signed URL usage see: https://cloud.google.com/storage/docs/access-control/signed-urls. Once the function source code upload is complete, the used signed URL should be provided in CreateFunction or UpdateFunction request as a reference to the function source code. When uploading source code to the generated signed URL, please follow these restrictions: * Source file type should be a zip file. * No credentials should be attached - the signed URLs provide access to the target bucket using internal service identity; if credentials were attached, the identity from the credentials would be used, but that identity does not have permissions to upload files to the URL. When making a HTTP PUT request, these two headers need to be specified: * `content-type: application/zip` And this header SHOULD NOT be specified: * `Authorization: Bearer YOUR_TOKEN`.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsGenerateUploadUrlRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(GenerateUploadUrlResponse) The response message.
"""
config = self.GetMethodConfig('GenerateUploadUrl')
return self._RunMethod(
config, request, global_params=global_params)
GenerateUploadUrl.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions:generateUploadUrl',
http_method='POST',
method_id='cloudfunctions.projects.locations.functions.generateUploadUrl',
ordered_params=['parent'],
path_params=['parent'],
query_params=[],
relative_path='v2beta/{+parent}/functions:generateUploadUrl',
request_field='generateUploadUrlRequest',
request_type_name='CloudfunctionsProjectsLocationsFunctionsGenerateUploadUrlRequest',
response_type_name='GenerateUploadUrlResponse',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Returns a function with the given name from the requested project.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Function) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}',
http_method='GET',
method_id='cloudfunctions.projects.locations.functions.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v2beta/{+name}',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsFunctionsGetRequest',
response_type_name='Function',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:getIamPolicy',
http_method='GET',
method_id='cloudfunctions.projects.locations.functions.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=['options_requestedPolicyVersion'],
relative_path='v2beta/{+resource}:getIamPolicy',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsFunctionsGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Returns a list of functions that belong to the requested project.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListFunctionsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions',
http_method='GET',
method_id='cloudfunctions.projects.locations.functions.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'orderBy', 'pageSize', 'pageToken'],
relative_path='v2beta/{+parent}/functions',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsFunctionsListRequest',
response_type_name='ListFunctionsResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates existing function.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}',
http_method='PATCH',
method_id='cloudfunctions.projects.locations.functions.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v2beta/{+name}',
request_field='function',
request_type_name='CloudfunctionsProjectsLocationsFunctionsPatchRequest',
response_type_name='Operation',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:setIamPolicy',
http_method='POST',
method_id='cloudfunctions.projects.locations.functions.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v2beta/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='CloudfunctionsProjectsLocationsFunctionsSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (CloudfunctionsProjectsLocationsFunctionsTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/functions/{functionsId}:testIamPermissions',
http_method='POST',
method_id='cloudfunctions.projects.locations.functions.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v2beta/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='CloudfunctionsProjectsLocationsFunctionsTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsLocationsOperationsService(base_api.BaseApiService):
"""Service class for the projects_locations_operations resource."""
_NAME = 'projects_locations_operations'
def __init__(self, client):
super(CloudfunctionsV2beta.ProjectsLocationsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
Args:
request: (CloudfunctionsProjectsLocationsOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='GET',
method_id='cloudfunctions.projects.locations.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v2beta/{+name}',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsOperationsGetRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. To override the binding, API services can add a binding such as `"/v1/{name=users/*}/operations"` to their service configuration. For backwards compatibility, the default name includes the operations collection id, however overriding users must ensure the name binding is the parent resource, without the operations collection id.
Args:
request: (CloudfunctionsProjectsLocationsOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/operations',
http_method='GET',
method_id='cloudfunctions.projects.locations.operations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v2beta/{+name}/operations',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsOperationsListRequest',
response_type_name='ListOperationsResponse',
supports_download=False,
)
class ProjectsLocationsRuntimesService(base_api.BaseApiService):
"""Service class for the projects_locations_runtimes resource."""
_NAME = 'projects_locations_runtimes'
def __init__(self, client):
super(CloudfunctionsV2beta.ProjectsLocationsRuntimesService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
r"""Returns a list of runtimes that are supported for the requested project.
Args:
request: (CloudfunctionsProjectsLocationsRuntimesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListRuntimesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations/{locationsId}/runtimes',
http_method='GET',
method_id='cloudfunctions.projects.locations.runtimes.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter'],
relative_path='v2beta/{+parent}/runtimes',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsRuntimesListRequest',
response_type_name='ListRuntimesResponse',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = 'projects_locations'
def __init__(self, client):
super(CloudfunctionsV2beta.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
r"""Lists information about the supported locations for this service.
Args:
request: (CloudfunctionsProjectsLocationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListLocationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v2beta/projects/{projectsId}/locations',
http_method='GET',
method_id='cloudfunctions.projects.locations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v2beta/{+name}/locations',
request_field='',
request_type_name='CloudfunctionsProjectsLocationsListRequest',
response_type_name='ListLocationsResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(CloudfunctionsV2beta.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| [
"gcloud@google.com"
] | gcloud@google.com |
17e8527f6eb8317bfbbdd729e8a0ff609113b9de | 1897e212482838ea65353ff5682342133e62f75f | /main.py | 0da2f3864e29de33d9d1d8569db8bfd0924dfe70 | [] | no_license | sakots/acnh-turnip-gspread | 1393376be4654d269573ac76498b58ee0ad03804 | f7c861b0e7306f528fa563b5d1d37f6a78a41bd7 | refs/heads/master | 2022-04-24T10:13:55.536417 | 2020-04-14T18:01:32 | 2020-04-14T18:01:32 | 256,100,512 | 0 | 0 | null | 2020-04-16T03:34:14 | 2020-04-16T03:34:13 | null | UTF-8 | Python | false | false | 2,336 | py | import base64
import json
import os
import urllib.parse
from optparse import OptionParser
import pprint
import yaml
import gspreads
from bind import BindService
from bot import TurnipPriceBotService
from logger import logger
def load_config():
# load normal options from command line options
parser = OptionParser()
parser.add_option(
"--config", dest="config", help="configuration file path", metavar="FILE"
)
option, _ = parser.parse_args()
config = yaml.load(open(option.config, "r").read(), Loader=yaml.FullLoader)
# load credentials from environ
config["gspread_name"] = os.environ.get("GSPREAD_NAME")
config["gspread_credential_base64"] = os.environ.get("GSPREAD_CREDENTIAL_BASE64")
config["mongo_host"] = os.environ.get("MONGO_HOST")
config["mongo_port"] = os.environ.get("MONGO_PORT")
config["mongo_app_username"] = os.environ.get("MONGO_APP_USERNAME")
config["mongo_app_password"] = os.environ.get("MONGO_APP_PASSWORD")
config["discord_bot_token"] = os.environ.get("DISCORD_BOT_TOKEN")
return config
def main():
config = load_config()
logger.info(pprint.pformat(config))
# gspread
json_ = base64.b64decode(config["gspread_credential_base64"])
credential = json.loads(json_)
gspread_service = gspreads.GspreadService(config["gspread_name"], credential)
# mongodb
if config.get("mongodb_use_inmemory") or False:
logger.info("use pymongo_inmemory client")
import pymongo_inmemory
mongodb = pymongo_inmemory.MongoClient()
else:
logger.info("create pymongo client")
username = urllib.parse.quote_plus(config["mongo_app_username"])
password = urllib.parse.quote_plus(config["mongo_app_password"])
import pymongo
mongodb = pymongo.MongoClient(
config["mongo_host"],
int(config["mongo_port"]),
username=username,
password=password,
authSource="admin",
)
collection = mongodb[config["mongo_database"]][config["mongo_collection"]]
# bind
bind_service = BindService(collection)
bot_service = TurnipPriceBotService(
config["discord_bot_token"], gspread_service, bind_service
)
bot_service.run()
mongodb.close()
if __name__ == "__main__":
main()
| [
"arsenic28@gmail.com"
] | arsenic28@gmail.com |
0a14a4f50550ed5bdbacbd5f1e6d1c6332fc77db | 8f41c18c78624713ebd148c0e03e4f757a7edd78 | /Music.py | 9cd85d66563733efb5a89c3b8e6f32e029795713 | [] | no_license | susansalkeld/jesses-learning-fun-time | 14e2a228fc84b3a964635b1094be97e34185b798 | f908199ca52ac3558fa3df19535cdf6c343e7683 | refs/heads/master | 2021-01-01T18:08:05.508978 | 2014-08-11T19:22:22 | 2014-08-11T19:22:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | Linear_4ths = ['Bx', 'Ex', 'Ax', 'Dx', 'Gx', 'Cx', 'Fx', 'B#', 'E#', 'A#', 'D#', 'G#', 'C#', 'F#', 'B', 'E', 'A', 'D', 'G', 'C', 'F', 'Bb', 'Eb', 'Ab', 'Db', 'Gb', 'Cb', 'Fb', 'Bbb', 'Ebb', 'Abb', 'Dbb', 'Gbb', 'Cbb', 'Fbb']
Run = True
while Run ==True:
Note_Choice = raw_input("Enter a note: ") # Assigns a note to a variable.
if Note_Choice in Linear_4ths:
Index = Linear_4ths.index(Note_Choice) # Finds the index of that note.
Linear_Diatonic_Steps = [] # Makes an empty list to hold the notes of a scale.
for note in Linear_4ths[Index - 5:Index + 2]: # Fills empty list with notes. 73625(ROOT)4
Linear_Diatonic_Steps.append(note)
Re_Order = [5,3,1,6,4,2,0,5] # Makes a list to reprisent the new order of the notes, low to high rather than in 4ths.
Linear_Diatonic_Steps = [Linear_Diatonic_Steps[i] for i in Re_Order] # Reorders notes low to high.
for note in Linear_Diatonic_Steps: # Prints notes.
print note
elif Note_Choice == "q":
Run = False
else:
print 'RAWR!!!!!!\nPlease choose an uppercase note: A,B,C,D,E,F,G.\nIf you would sharp or flat, follow the note with "#" or "b".\nEnter "q" to quit.' | [
"susansalkeld@sfoc02ml6f4fd57.ads.autodesk.com"
] | susansalkeld@sfoc02ml6f4fd57.ads.autodesk.com |
77b57da02d5cd5d2d157d1cab3f405543fb7683c | ed9842152449b3292a103d3701d6f8fe0e775ad8 | /main.py | 0244252efefd94e19a0ab466ccbfe7d60062f4f2 | [
"MIT"
] | permissive | gunyu1019/SCHOOL-TIMETABLE | e01cbf6d086d012f5d56aac9e9a231b03ccf668c | 1108196912c268ffd7a0d1792ccb61954159d52b | refs/heads/master | 2022-12-13T14:47:46.726528 | 2020-09-10T12:02:40 | 2020-09-10T12:02:40 | 294,032,101 | 2 | 2 | MIT | 2020-09-10T12:02:20 | 2020-09-09T07:09:44 | Python | UTF-8 | Python | false | false | 4,013 | py | import datetime
import requests
import os
import json
from PIL import Image
from io import BytesIO
directory = os.path.dirname(os.path.abspath(__file__)).replace("\\","/")
if os.path.isfile(directory + "/key.txt"):
key_file = open(directory + "/key.txt",mode='r')
key = key_file.read()
key_file.close()
if os.path.isfile(directory + "/config.txt"):
config_file = open(directory + "/config.txt",mode='r',encoding='utf-8')
config = config_file.readlines()
config_file.close()
config_json = {}
for i in config:
line = config.index(i)
if i == "==== config.json ====\n":
config_json = json.loads("".join(config[line+1:]))
if config_json == {}:
print("에러 발생(404): config 설정을 찾을 수 없습니다.")
#key = config_json['key']
school_name = config_json['school_nm']
grade = config_json['grade']
class_nm = config_json['class']
else:
key = "NEIS-API 키를 작성하여 주세요!"
school_name = "서울초등학교"
grade = 1
class_nm = 1
def name(name):
tmp = name.replace("통합과학","통과").replace("통합사회","통사").replace("과학탐구실험","과탐").replace("활동","").replace("기술·가정","기가").replace("-","").replace("주제선택","주제").replace("(자)","").replace("(창)","")
return tmp.replace("진로와 직업","진로").replace("즐거운생활","즐거운").replace("슬기로운생활","슬기로운").replace("바른생활","도덕").replace(" ","")
def main():
header1 = {
"Type":"json",
"KEY":key,
"SCHUL_NM":school_name
}
resp1 = requests.get("https://open.neis.go.kr/hub/schoolInfo",params=header1)
json1 = json.loads(resp1.text)
today = datetime.datetime.today()
last_monday = today - datetime.timedelta(days = today.weekday())
last_friday = today + datetime.timedelta(days = 4 - today.weekday())
type_nm = json1['schoolInfo'][1]['row'][0]['SCHUL_KND_SC_NM']
type_list = {"초등학교":"els","중학교":"mis","고등학교":"his","특수학교":"sps"}
if not type_nm in type_list:
print("에러 발생(404): 지원하지 않는 유형의 학교입니다. | 초등학교, 중학교, 고등학교만 지원합니다.")
return
header2 = {
"Type":"json",
"KEY":key,
"ATPT_OFCDC_SC_CODE":json1['schoolInfo'][1]['row'][0]['ATPT_OFCDC_SC_CODE'],
"SD_SCHUL_CODE":json1['schoolInfo'][1]['row'][0]['SD_SCHUL_CODE'],
"GRADE":grade,
"CLASS_NM":class_nm,
"TI_FROM_YMD":last_monday.strftime('%Y%m%d'),
"TI_TO_YMD":last_friday.strftime('%Y%m%d')
}
resp2 = requests.get(f"https://open.neis.go.kr/hub/{type_list[type_nm]}Timetable",params=header2)
json2 = json.loads(resp2.text)
if 'RESULT' in json2.keys():
if 'CODE' in json2['RESULT'].keys():
ercode = json2['RESULT']['CODE']
if ercode == 'INFO-200':
print("에러 발생(404): 학교를 찾지 못했습니다.")
return
class_name = [["" for col in range(7)] for row in range(5)]
for i in json2[f'{type_list[type_nm]}Timetable'][1]['row']:
i_class_name = i['ITRT_CNTNT']
weekend = int(i['ALL_TI_YMD'])-int(last_monday.strftime('%Y%m%d'))
class_name[weekend][int(i['PERIO'])-1] = name(i_class_name)
weekend_list = ["월","화","수","목","금"]
answer = "{"
for i in class_name:
weekend_name = weekend_list[class_name.index(i)]
class_name_i = str(i).replace('\'','\"')
answer += f",\"{weekend_name}\":{class_name_i}"
answer = answer.replace(',','',1)
answer += "}"
header3 = {
"text": answer
}
resp3 = requests.get("http://vz.kro.kr/sigan.php",params=header3)
html = resp3.content
filename = today.strftime('%Y-%m-%d %H-%M-%S')
i = Image.open(BytesIO(html))
i.save(f'{directory}/image/{filename}.png')
return
if __name__ == "__main__":
main() | [
"gunyu1019@gmail.com"
] | gunyu1019@gmail.com |
d9b8873a595adc377cc7200a263342446e210272 | b9578ee40864562f46723ebca677f7bd858c2e26 | /osciloscopio.py | d3fdd6ee884f61e6a9e9df0f4a08d72f7c7dbb3f | [] | no_license | agustin92/Instrumentacion | a89b5267175e577eefd34be6650293016e571512 | 33bc00c88be97ed324d877fa442a594d50b75d33 | refs/heads/master | 2020-05-02T06:14:23.092497 | 2019-06-26T19:36:44 | 2019-06-26T19:36:44 | 177,790,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,379 | py | import visa
import numpy as np
from matplotlib import pyplot as plt
# Inicializamos el Resource Manager de visa. En el caso de pyvisa-py, se coloca
# el '@py'. Sino, con NiVisa, va vacío.
rm = visa.ResourceManager()
class Osciloscopio:
def __init__(self,rm,num):
# Toma como parámetros para abrir el canal de comunicación el ResourceManager
#y el número de equipo dentro de la lista de instrumentos
data = rm.list_resources() # Guarda la información de la lista de instrumentos
self.inst = rm.open_resource('{}'.format(data[num]))
self.parameters = None
def identity(self):
#Devuelve el nombre del instrumento según el fabricante.
name = self.inst.query("*IDN?")
print("Name of this device: {}".format(name))
def get_parameters(self):
# Toma los parámetros necesarios para escalar la señal del osciloscopio
if self.parameters is None:
self.parameters = self.inst.query_ascii_values('WFMPRE:XZE?;XIN?;YZE?;YMU?;YOFF?;', separator=';')
def curva(self):
#
self.get_parameters()
xze, xin, yze, ymu, yoff = self.parameters
data = self.inst.query_ascii_values("CURV?",container=np.array)
tiempo = xze + np.arange(len(data)) * xin
data = (data-yoff)* ymu + yoff
return tiempo, data
| [
"agustin.lopezpedroso@gmail.com"
] | agustin.lopezpedroso@gmail.com |
671ee26937753834c9c0240dc4f3c2b8aa662922 | 5501b76a1517c0ee642594847effd84b9413e6bf | /util.py | d37c9d5b01198c702f5781490261c988839eba38 | [] | no_license | nakajimakou1/deep-high-dynamic-range | e22b824a5fee176ec08566801ead83ba1fdece62 | bd02a56d9913262ab059d43b9a979083df41e4ef | refs/heads/master | 2022-09-05T14:11:16.329844 | 2021-05-26T07:22:22 | 2021-05-26T07:22:22 | 296,060,924 | 0 | 0 | null | 2020-09-16T14:41:56 | 2020-09-16T14:41:55 | null | UTF-8 | Python | false | false | 2,235 | py | import os
import numpy as np
from typing import List
import cv2
from config import MU
def read_dir(path: str, folder_only: bool = True) -> List[str]:
"""Read a directory
Args:
path: A str path
folder_only: Boolean to indicate whether includes folder results only
Returns:
A list of str of paths
"""
if folder_only:
return [f.path for f in os.scandir(path) if f.is_dir()]
else:
return [f.path for f in os.scandir(path)]
def im2single(img: np.ndarray) -> np.ndarray:
"""Convert a integer image to single-precision float
Args:
img: A integer image
Returns:
A float image
"""
info = np.iinfo(img.dtype)
return img.astype(np.float32) / info.max
def im2double(img: np.ndarray) -> np.ndarray:
"""Convert a integer image to double-precision float
Args:
img: A integer image
Returns:
A double image
"""
info = np.iinfo(img.dtype)
return img.astype(np.float64) / info.max
def float2int(img: np.ndarray, type) -> np.ndarray:
"""Convert a float image to specific integer image
Args:
img: A single-precision float image
Returns:
A uint16 image image
"""
return (img * np.iinfo(type).max).astype(type)
def np_compute_PSNR(input: np.ndarray, reference: np.ndarray) -> float:
"""Compute Peak signal-to-noise ratio(PSNR)
Args:
input: A produced image
reference: A reference image
Returns:
Error in float
"""
input = im2single(input)
reference = im2single(reference)
num_pixels = input.size
squared_error = np.sum(np.square(input - reference)) / num_pixels
error = 10 * np.log10(1 / squared_error)
return error
def crop_img(input: np.ndarray, pad: int) -> np.ndarray:
"""Crop out image boundary
Args:
Input: A image
pad: A int value of cropped size
Returns:
Cropped image
"""
return input[pad: -pad, pad: -pad, :]
def np_range_compress(img):
"""Differentiable tonemapping operator
Args:
img: input image/batch of images
Returns:
Tonemapped images
"""
return np.log(1.0 + MU * img) / np.log(1.0 + MU)
| [
"th3charlie@gmail.com"
] | th3charlie@gmail.com |
bd0443ac664d583b35f574b914b7d097a427430c | e5897d5b5eb3b018bec8703f01cfc666acea5b38 | /isy994/items/variables/variable_state.py | 9ff4bd1fca3dd2830528fb6ce10c205ddf9ea290 | [
"MIT"
] | permissive | mjcumming/ISY994v5 | 5de41ce7e12be44c35dc0818daf639bb8c0e5487 | 928d8359fd15363e15b8daa402fbb1f5f53f3c45 | refs/heads/master | 2022-05-19T06:10:59.788621 | 2022-05-08T13:16:29 | 2022-05-08T13:16:29 | 187,289,265 | 4 | 10 | MIT | 2021-06-26T13:34:23 | 2019-05-17T22:36:55 | Python | UTF-8 | Python | false | false | 219 | py | #! /usr/bin/env python
from .variable_base import Variable_Base
class Variable_State(Variable_Base):
def __init__(self, container, variable_info):
Variable_Base.__init__(self, container, variable_info)
| [
"mike@4831.com"
] | mike@4831.com |
4132e6dec6e93bde18683faa467d96d2a725b8e5 | a70230074b302cdd95fad35d434282853f3047f2 | /sample1.py | b0038258599652a0fc4d1058ec79a887cc29ccf7 | [] | no_license | Milu-Rashli-T-K/Malayalam-Speech-Recognition-using-LSTM | 57ba2be17422b5ca24fa7fae6517e40fc62e1532 | d383821d8c8c8de81ad626b61a5609cfa737d13e | refs/heads/main | 2023-06-19T09:41:38.247119 | 2021-07-18T17:54:30 | 2021-07-18T17:54:30 | 381,291,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,682 | py | import tflearn
import numpy as np
import speechData
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import librosa
import os
res=os.listdir("samplecode")
print(len(res))
import csv
mydict = []
import matplotlib.pyplot as plt
# from google_trans_new import google_translator
#.............preprocessing & feature extraction.................
for r in res:
import numpy as np
rr=r.split('_')[0]
y, sr = librosa.load("samplecode//" + r, mono=True)
mfcc = librosa.feature.mfcc(y=y, sr=sr)
toappend=[]
for e in mfcc:
toappend.append( str(np.mean(e)))
data=','.join(toappend)
mydict.append({'Class':rr, 'Data': data})
# # ============================================================
#
#create csv file
# field names
fields = ['Class', 'Data']
# name of csv file
filename = "mal_num.csv"
# writing to csv file
with open(filename, 'w') as csvfile:
# creating a csv dict writer object
writer = csv.DictWriter(csvfile, fieldnames=fields)
# writing headers (field names)
writer.writeheader()
# writing data rows
writer.writerows(mydict)
#
outputlabels=["പുസ്തകം","വരയ്ക്കുക","അറിവ്","പഠിക്കുക","ലൈബ്രറി","വായിക്കുക","സ്കൂൾ","വിദ്യാർത്ഥി","അധ്യാപകൻ","എഴുതുക"]
#
# #.................training & testing .....................
#
learning_rate = 0.00001
training_iters =3000 # steps
#
width = 20 # mfcc features
height = 1000 # (max) length of utterance
classes = 10 # digits
#
X, Y = speechData.loadDataSet()
trainX, testX, trainY, testY = train_test_split(X, Y, test_size=0.20, random_state=4)
print("Train data = ", np.asarray(trainX).shape, " : ", type(trainX))
print("Train label = ", np.asarray(trainY).shape, " : ", type(trainY))
#print(trainY[0:1])
print("Test data = ", np.asarray(testX).shape, " : ", type(testX))
print("Test label = ", np.asarray(testY).shape, " : ", type(testY))
# Network building
net = tflearn.input_data([None, width, height])
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, classes, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate, loss='categorical_crossentropy')
#
model = tflearn.DNN(net, tensorboard_verbose=0)
if not os.path.isfile("tflearn.lstm.model.meta"):
# model.load("tflearn.lstm.model") // for repeated turning by removeing not from if .
print("lenth******************************===========")
print(trainY)
print(len(trainX[0]))
model.fit(trainX, trainY, n_epoch=training_iters, validation_set=(testX, testY), show_metric=True,batch_size=10)
print("\nSLNO : Predict -> Label\n")
lt = len(testX)
for i in range(1, lt + 1):
print(i, "\t: ", np.argmax(model.predict(testX[i - 1:i])), " --> ", np.argmax(testY[i - 1:i]))
model.save("tflearn.lstm.model")
else:
#translator = google_translator()
model.load("tflearn.lstm.model")
print("\n....Model is already trained....\n")
print("\nSLNO : Predict -> Label\n")
curt = 0
lt = len(testX)
ytest=[]
ans=[]
for i in range(1, lt + 1):
p = np.argmax(model.predict(testX[i - 1:i]))
v = np.argmax(testY[i - 1:i])
ans.append(p)
ytest.append(v)
if p == v:
curt += 1
#translate_text = translator.translate(str(outputlabels[int(p)]), lang_src='en', lang_tgt='ml')
print(i, "\t: ", int(p)+1,outputlabels[int(p)], " --> ", int(v+1))
cm=confusion_matrix(ytest,ans)
print("confusion_matrix")
print(cm)
print("\n\t ACCURACY : ", curt / lt)
#..........................prediction..............
print("Prediction Result")
files = os.listdir("predict/")
#print(len(files))
for wav in files:
if not wav.endswith(".wav"): continue
model.load("tflearn.lstm.model")
T = speechData.mfcc_target1("predict/"+wav)
#lt = len(T)
pp = np.argmax(model.predict(T))
print(outputlabels[int(pp)])
# #plot
# no_of_recordings=len(res)
# plt.figure(figsize=(30,5))
# index=np.arange(len(res))
# plt.bar(index,no_of_recordings)
# plt.xlabel('commands',fontsize=12)
# plt.ylabel('No of recordings',fontsize=12)
# #plt.xticks(index,res,fontsize=15,rotation=36)
# plt.title('no.of recordings of each command')
# plt.show()
| [
"noreply@github.com"
] | Milu-Rashli-T-K.noreply@github.com |
a32e9c52de8dddec6c10e891923cf8e99a886738 | ecf677cb0f0e31bd322e15291b564da98779c69d | /load_OMDBAPI.py | 34c94be1bb072a90eaec94d975e349454dbbc5b4 | [] | no_license | jonhartm/SI507_FinalProject | e160037999e3404ac55f5a98b5ca48b52690fb61 | 6f494f8c537dd79b36afe68a33a59a2099f71cdf | refs/heads/master | 2022-12-13T05:25:40.315075 | 2018-04-21T02:44:18 | 2018-04-21T02:44:18 | 128,813,154 | 0 | 0 | null | 2021-06-01T22:08:39 | 2018-04-09T18:00:41 | Python | UTF-8 | Python | false | false | 3,656 | py | #-------------------------------------------------------------------------------
# LOAD_OMDBAPI.PY
# Functions for loading data from the Open Movie Database API
#-------------------------------------------------------------------------------
from caching import *
from secrets import *
from util import Timer
import sqlite3
Database_Name = "movies.db"
# Import from the Open Movie Database
# params: title: the title of the movie
# year: the year of the file (default=None)
def Import_OMD(title, year=None):
OMD_Cache = CacheFile('OMDBCache.json')
url = 'http://www.omdbapi.com'
params = {'apikey':OMDB_API_KEY, "t":title}
if year is not None:
params['y'] = year
return OMD_Cache.CheckCache_API(url, params, keys = ['Rated', 'Poster', 'Ratings'])
# Does the actual importing from the OMDB and inserts into the database.
# Decides which films to load by running a query to get what are likely
# popular films
def InitializeOMDBImport():
t = Timer()
t.Start()
print("Loading data from OMDB API...")
conn = sqlite3.connect(Database_Name)
cur = conn.cursor()
cur2 = conn.cursor()
# get ratings for the most popular, most highly rated films, and any film that
# has won at least 2 academy awards
statement = '''
SELECT Title, Release FROM Film
WHERE FilmID IN
(
SELECT MovieID
FROM Ratings
GROUP BY MovieID
HAVING COUNT(*) > 10
ORDER BY AVG(Rating)
LIMIT 350
)
OR FilmID IN
(
SELECT MovieID
FROM Ratings
GROUP BY MovieID
ORDER BY COUNT(*) DESC
LIMIT 500
)
OR FilmID IN
(
SELECT FilmID
FROM Film
WHERE AA_Wins > 1
)
'''
cur.execute(statement)
updates = []
for row in cur:
try:
OMD_data = Import_OMD(row[0], row[1][:4])
values = [None, None, None, None, None, row[0], row[1]]
values[0] = OMD_data['Rated']
values[1] = OMD_data['Poster']
for ratings in OMD_data['Ratings']:
if ratings['Source'] == "Internet Movie Database": values[2] = ratings['Value'].split('/')[0]
if ratings['Source'] == "Rotten Tomatoes": values[3] = ratings['Value']
if ratings['Source'] == "Metacritic": values[4] = ratings['Value'].split('/')[0]
updates.append(values)
except Exception as e:
pass
statement = 'UPDATE Film SET Rating=?, Poster=?, Rating_IMDB = ?, Rating_RT=?, Rating_MC=? WHERE Title == ? AND Release == ?'
cur.executemany(statement, updates)
conn.commit()
conn.close()
t.Stop()
print("OMDB Import completed in " + str(t))
def ImportAndAddOMDBData(title, year):
OMD_data = Import_OMD(title, year)
values = [None, None, None, None, None, title, year+"%"]
values[0] = OMD_data['Rated']
values[1] = OMD_data['Poster']
for ratings in OMD_data['Ratings']:
if ratings['Source'] == "Internet Movie Database": values[2] = ratings['Value'].split('/')[0]
if ratings['Source'] == "Rotten Tomatoes": values[3] = ratings['Value']
if ratings['Source'] == "Metacritic": values[4] = ratings['Value'].split('/')[0]
print(title, year)
print(values)
conn = sqlite3.connect(Database_Name)
cur = conn.cursor()
statement = 'UPDATE Film SET Rating=?, Poster=?, Rating_IMDB = ?, Rating_RT=?, Rating_MC=? WHERE Title == ? AND Release LIKE ?'
cur.execute(statement, values)
conn.commit()
conn.close()
return OMD_data
| [
"jonhartm@umich.edu"
] | jonhartm@umich.edu |
f04a5036b5def4aa77cf1e5a13b71f3df5ba2dcc | 82d61a768a865bcec0de1611e14adcd3fa256ae4 | /db_migrate.py | 0e9b216acddc27001bb26efd50240ae62394f8c3 | [] | no_license | DmitryGood/hockey | 44336ebb9a4d485d8254253ae99b0b4b2afe6ce7 | d8e65b075aee008cea9b279c9900270b44ae458e | refs/heads/master | 2016-09-14T07:16:12.899897 | 2016-04-29T10:33:51 | 2016-04-29T10:33:51 | 57,374,558 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,187 | py | import imp
from migrate.versioning import api
from flask_APIdefinition import db
from model_hockey import *
from config import WorkConfig
#from config import SQLALCHEMY_DATABASE_URI
#from config import SQLALCHEMY_MIGRATE_REPO
print "Database: ", WorkConfig.SQLALCHEMY_DATABASE_URI
print "Migrate REPO: ", WorkConfig.SQLALCHEMY_MIGRATE_REPO
migration = WorkConfig.SQLALCHEMY_MIGRATE_REPO + '/versions/%03d_migration.py' % \
(api.db_version(WorkConfig.SQLALCHEMY_DATABASE_URI, WorkConfig.SQLALCHEMY_MIGRATE_REPO) + 1)
tmp_module = imp.new_module('old_model')
old_model = api.create_model(WorkConfig.SQLALCHEMY_DATABASE_URI, WorkConfig.SQLALCHEMY_MIGRATE_REPO)
exec old_model in tmp_module.__dict__
script = api.make_update_script_for_model(WorkConfig.SQLALCHEMY_DATABASE_URI, WorkConfig.SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, Base.metadata)
open(migration, "wt").write(script)
#api.upgrade(WorkConfig.SQLALCHEMY_DATABASE_URI, WorkConfig.SQLALCHEMY_MIGRATE_REPO)
print 'New migration saved as ' + migration
print 'Current database version: ' + str(api.db_version(WorkConfig.SQLALCHEMY_DATABASE_URI, WorkConfig.SQLALCHEMY_MIGRATE_REPO))
| [
"dhoroshih@gmail.com"
] | dhoroshih@gmail.com |
78ae0e4d4da2857c376fba688f6e13288cfd9885 | 72dc0b8b86134a7471fc87c908809cd812c76307 | /bin/gunicorn | a84bb7b72229ac26286872bbf1125fdfe03f86cb | [] | no_license | aman-roy/CBT-therapy | 704dc7301c2eda00c4558e84b2c8bddb1a26f9cc | c9ace3e3a0f01d92fa9bd47e12c007e176ac625a | refs/heads/master | 2020-08-14T18:28:19.710682 | 2019-10-15T12:47:34 | 2019-10-15T12:47:34 | 215,215,275 | 1 | 0 | null | 2020-02-05T15:45:38 | 2019-10-15T05:38:23 | Python | UTF-8 | Python | false | false | 244 | #!/home/tux/frames/CBT_therapy/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"fictionfree54@gmail.com"
] | fictionfree54@gmail.com | |
1216c26dbd4120f64a34e26c4ce2bf0aee169116 | e4144eb2e2439fcea39b6cd6d87e41191cbe876b | /src/tablet_ui_server/node_modules/dtrace-provider/src/build/config.gypi | 09db07cff526a0697ba16fda9c00f860b5390e4c | [
"BSD-2-Clause",
"Apache-2.0"
] | permissive | EmmaLovesJIM/Emma2k19 | 526d265fa788c4aa01f37831dc42e162a4c9df91 | e5d7cf656597e2080f0e7a3b725f5f1df2a7ae67 | refs/heads/master | 2020-04-20T00:21:16.450134 | 2019-01-31T12:17:34 | 2019-01-31T12:17:34 | 168,519,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,376 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_file": "icudt60l.dat",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt60l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "60",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "57.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/marcelochsendorf/.node-gyp/8.12.0",
"standalone_static_library": 1,
"dry_run": "",
"save_dev": "",
"legacy_bundling": "",
"only": "",
"viewer": "man",
"browser": "",
"commit_hooks": "true",
"also": "",
"rollback": "true",
"sign_git_commit": "",
"audit": "true",
"usage": "",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"metrics_registry": "https://registry.npmjs.org/",
"parseable": "",
"shrinkwrap": "true",
"timing": "",
"init_license": "ISC",
"if_present": "",
"init_author_email": "",
"sign_git_tag": "",
"cache_max": "Infinity",
"cert": "",
"local_address": "",
"long": "",
"git_tag_version": "true",
"preid": "",
"fetch_retries": "2",
"noproxy": "",
"registry": "https://registry.npmjs.org/",
"key": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/local/etc/npmrc",
"prefer_online": "",
"always_auth": "",
"logs_max": "10",
"cache_lock_retries": "10",
"global_style": "",
"update_notifier": "true",
"heading": "npm",
"audit_level": "low",
"offline": "",
"searchlimit": "20",
"read_only": "",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"allow_same_version": "",
"engine_strict": "",
"description": "true",
"https_proxy": "",
"userconfig": "/Users/marcelochsendorf/.npmrc",
"init_module": "/Users/marcelochsendorf/.npm-init.js",
"cidr": "",
"user": "",
"node_version": "8.12.0",
"save": "true",
"editor": "vi",
"ignore_prepublish": "",
"auth_type": "legacy",
"tag": "latest",
"script_shell": "",
"global": "",
"progress": "true",
"searchstaleness": "900",
"ham_it_up": "",
"optional": "true",
"save_prod": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "Infinity",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"sso_poll_frequency": "500",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"ca": "",
"tag_version_prefix": "v",
"scripts_prepend_node_path": "warn-only",
"sso_type": "oauth",
"save_prefix": "^",
"dev": "",
"group": "20",
"save_exact": "",
"fetch_retry_factor": "10",
"prefer_offline": "",
"cache_lock_stale": "60000",
"version": "",
"otp": "",
"cache_min": "10",
"cache": "/Users/marcelochsendorf/.npm",
"searchexclude": "",
"color": "true",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/6.5.0 node/v8.12.0 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"send_metrics": "",
"umask": "0022",
"node_options": "",
"init_version": "1.0.0",
"git": "git",
"scope": "",
"init_author_name": "",
"onload_script": "",
"unsafe_perm": "true",
"tmp": "/var/folders/9t/5gv2fs0d7fv4351s5g5qy15r0000gn/T",
"link": "",
"prefix": "/usr/local"
}
}
| [
"marcel.ochsendorf@gmail.com"
] | marcel.ochsendorf@gmail.com |
1e547431f1304fab875e263c577d86e91b92a9ce | 747f759311d404af31c0f80029e88098193f6269 | /addons/base_partner_surname/partner.py | 254932e02e9d5fd896883ede6d5d21855b6e91c1 | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 69 | py | /home/openerp/production/extra-addons/base_partner_surname/partner.py | [
"geerish@omerp.net"
] | geerish@omerp.net |
601af5281c21060fb20809a752978fe7beb6336f | 7ba9be0a75add6b54c72b86dbc8850ff3b4ed4b7 | /gptorch/test/kernels_test.py | 7fdd26549cbddafff1c17ea34599bdfd2ed804ff | [] | no_license | yangkky/gptorch | cbf0ef91d1af14cd211836834febb39456c69d6c | 8534d5687c0bbf921e4caa30c04f4a4bf460d102 | refs/heads/master | 2021-07-14T08:30:57.491654 | 2019-01-10T13:04:19 | 2019-01-10T13:04:19 | 133,904,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,422 | py | import torch
from torch.autograd import Variable
from torch.nn.parameter import Parameter
import torch.nn as nn
from gptorch import kernels
import numpy as np
from scipy.spatial import distance
def test_polynomial():
s0 = np.random.random()
sp = np.random.random()
d = np.random.choice(np.arange(1, 5))
ker = kernels.PolynomialKernel(int(d), s0=s0, sp=sp)
X1 = np.random.random((3, 5))
V1 = Variable(torch.Tensor(X1))
X2 = np.random.random((4, 5))
V2 = Variable(torch.Tensor(X2))
K = (s0 ** 2 + sp ** 2 * X1 @ X2.T) ** d
K_test = ker(V1, V2)
assert np.allclose(K, K_test.data.numpy())
def test_cdist():
X1 = np.random.random((3, 5))
V1 = Variable(torch.Tensor(X1))
X2 = np.random.random((4, 5))
V2 = Variable(torch.Tensor(X2))
d = distance.cdist(X1, X2)
d_test = kernels.cdist(V1, V2)
assert np.allclose(d, d_test.data.numpy())
d2 = kernels.cdist(V1, V2, squared=True)
assert np.allclose(d ** 2, d2.data.numpy())
def test_matern():
ell = 10 * np.random.random()
ker = kernels.MaternKernel(ell=ell)
X1 = np.random.random((10, 5))
V1 = Variable(torch.Tensor(X1))
X2 = np.random.random((4, 5))
V2 = Variable(torch.Tensor(X2))
d = distance.cdist(X1, X2)
D_L = d / ell
first = (1.0 + np.sqrt(5.0) * D_L) + 5.0 * np.power(D_L, 2) / 3.0
second = np.exp(-np.sqrt(5.0) * D_L)
K = first * second
K_test = ker(V1, V2)
assert np.allclose(K, K_test.data.numpy())
def test_se():
ell = 10 * np.random.random()
sf = np.random.random()
ker = kernels.SEKernel(ell=ell, sf=sf)
X1 = np.random.random((3, 5))
V1 = Variable(torch.Tensor(X1))
X2 = np.random.random((4, 5))
V2 = Variable(torch.Tensor(X2))
d = distance.cdist(X1, X2)
D_L = d ** 2 / ell ** 2
K = sf ** 2 * np.exp(-0.5 * D_L)
K_test = ker(V1, V2)
assert np.allclose(K, K_test.data.numpy())
def naive_wdk(x1, x2, S, D, cutoff=4.5):
subs = S[x1, x2]
k = 0
for i, s in enumerate(subs):
total = 0
for j, ss in enumerate(subs):
if i == j:
continue
if D[i, j] < cutoff:
total += ss
k += s * total
return k
def test_fixed_wdk():
L = 5
X1 = np.array([[0, 1, 2, 3, 1],
[0, 2, 1, 3, 2],
[1, 2, 2, 3, 1]])
X2 = np.array([[1, 1, 2, 1, 0],
[0, 2, 1, 3, 2]])
D = np.array([[0.0, 5.0, 3.0, 6.0, 2.0],
[5.0, 0.0, 5.0, 6.0, 7.0],
[3.0, 5.0, 0.0, 1.0, 2.0],
[6.0, 6.0, 1.0, 0.0, 1.0],
[2.0, 7.0, 2.0, 1.0, 0.0]])
contacts = [(0, 2), (0, 4), (2, 3), (2, 4), (3, 4)]
graph = [[2, 4, -1],
[-1, -1, -1],
[0, 3, 4],
[2, 4, -1],
[0, 2, 3]]
S = torch.randn(size=(4, 10))
S = S @ S.t()
a = np.random.random()
gamma = 1.0
ke = kernels.FixedWDK(contacts, L, S, a=a)
S = S.detach().numpy()
K11 = np.zeros((len(X1), len(X1)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X1):
K11[i, j] = naive_wdk(x1, x2, S, D)
K22 = np.zeros((len(X2), len(X2)))
for i, x1 in enumerate(X2):
for j, x2 in enumerate(X2):
K22[i, j] = naive_wdk(x1, x2, S, D)
K12 = np.zeros((len(X1), len(X2)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X2):
K12[i, j] = naive_wdk(x1, x2, S, D)
K1_star = np.expand_dims(np.sqrt(np.diag(K11)), 1)
K2_star = np.expand_dims(np.sqrt(np.diag(K22)), 0)
K12 = K12 / K1_star / K2_star
K12 = (K12 ** gamma) * a ** 2
K = ke(torch.tensor(X1), torch.tensor(X2)).detach().numpy()
assert np.allclose(K12, K)
def test_wdk():
L = 5
X1 = np.array([[0, 1, 2, 3, 1],
[0, 2, 1, 3, 2],
[1, 2, 2, 3, 1]])
X2 = np.array([[1, 1, 2, 1, 0],
[0, 2, 1, 3, 2]])
D = np.array([[0.0, 5.0, 3.0, 6.0, 2.0],
[5.0, 0.0, 5.0, 6.0, 7.0],
[3.0, 5.0, 0.0, 1.0, 2.0],
[6.0, 6.0, 1.0, 0.0, 1.0],
[2.0, 7.0, 2.0, 1.0, 0.0]])
contacts = [(0, 2), (0, 4), (2, 3), (2, 4), (3, 4)]
graph = [[2, 4, -1],
[-1, -1, -1],
[0, 3, 4],
[2, 4, -1],
[0, 2, 3]]
ke = kernels.WeightedDecompositionKernel(contacts, L, 4, 10)
S = (ke.A @ ke.A.t()).detach().numpy()
K11 = np.zeros((len(X1), len(X1)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X1):
K11[i, j] = naive_wdk(x1, x2, S, D)
K22 = np.zeros((len(X2), len(X2)))
for i, x1 in enumerate(X2):
for j, x2 in enumerate(X2):
K22[i, j] = naive_wdk(x1, x2, S, D)
K12 = np.zeros((len(X1), len(X2)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X2):
K12[i, j] = naive_wdk(x1, x2, S, D)
K1_star = np.expand_dims(np.sqrt(np.diag(K11)), 1)
K2_star = np.expand_dims(np.sqrt(np.diag(K22)), 0)
K12 = K12 / K1_star / K2_star
K = ke(torch.tensor(X1), torch.tensor(X2)).detach().numpy()
assert np.allclose(K12, K)
def naive_swdk(x1, x2, S, w):
k = 0
for i, (xx1, xx2) in enumerate(zip(x1, x2)):
s12 = S[xx1, xx2]
others = 0
for j, (a1, a2) in enumerate(zip(x1, x2)):
others += S[a1, a2] * w[i, j]
k += s12 * others
return k
def naive_normed_swdk(x1, x2, S, w):
k = naive_swdk(x1, x2, S, w)
k /= np.sqrt(naive_swdk(x1, x1, S, w))
k /= np.sqrt(naive_swdk(x2, x2, S, w))
return k
def test_swdk():
n1 = 4
n2 = 5
m = 6
L = 10
X1 = np.random.choice(m, size=(n1, L))
X2 = np.random.choice(m, size=(n2, L))
T1 = torch.LongTensor(X1)
T2 = torch.LongTensor(X2)
ke = kernels.SoftWeightedDecompositionKernel(L, m, 2 * m, a=1.0)
K12 = ke(T1, T2).detach().numpy()
S = ke.A @ ke.A.t()
S = S.detach().numpy()
w_flat = ke.w.detach().numpy()
i_x, i_y = np.tril_indices(L, k=-1)
w = np.zeros((L, L))
w[i_x, i_y] = w_flat
w[i_y, i_x] = w_flat
w = 1 / (1 + np.exp(-w))
K = np.zeros((n1, n2))
for i in range(n1):
for j in range(n2):
K[i, j] = naive_normed_swdk(X1[i], X2[j], S, w)
assert np.allclose(K12, K)
def naive_sewdk(x1, x2, S, graph):
k = 0
for i, (xx1, xx2) in enumerate(zip(x1, x2)):
s12 = S[i, xx1, xx2]
others = 0
for j in graph[i]:
if j == -1:
continue
others += S[j, x1[j], x2[j]]
k += s12 * others
return k
def test_series_wdk():
L = 5
X1 = np.array([[0, 1, 2, 3, 1],
[0, 2, 1, 3, 2],
[1, 2, 2, 3, 1]])
X2 = np.array([[1, 1, 2, 1, 0],
[0, 2, 1, 3, 2]])
contacts = [(0, 2), (0, 4), (2, 3), (2, 4), (3, 4)]
graph = [[2, 4, -1],
[-1, -1, -1],
[0, 3, 4],
[2, 4, -1],
[0, 2, 3]]
ke = kernels.SeriesWDK(contacts, L, 4, 10)
S = (ke.A @ ke.A.transpose(-1, -2)).detach().numpy()
K11 = np.zeros((len(X1), len(X1)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X1):
K11[i, j] = naive_sewdk(x1, x2, S, graph)
K22 = np.zeros((len(X2), len(X2)))
for i, x1 in enumerate(X2):
for j, x2 in enumerate(X2):
K22[i, j] = naive_sewdk(x1, x2, S, graph)
K12 = np.zeros((len(X1), len(X2)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X2):
K12[i, j] = naive_sewdk(x1, x2, S, graph)
K1_star = np.expand_dims(np.sqrt(np.diag(K11)), 1)
K2_star = np.expand_dims(np.sqrt(np.diag(K22)), 0)
K12 = K12 / K1_star / K2_star
K = ke(torch.tensor(X1), torch.tensor(X2)).detach().numpy()
assert np.allclose(K12, K)
def naive_sswdk(x1, x2, S, w):
k = 0
for i, (xx1, xx2) in enumerate(zip(x1, x2)):
s12 = S[i, xx1, xx2]
others = 0
for j, (a1, a2) in enumerate(zip(x1, x2)):
others += S[j, a1, a2] * w[i, j]
k += s12 * others
return k
def test_soft_series_wdk():
L = 5
X1 = np.array([[0, 1, 2, 3, 1],
[0, 2, 1, 3, 2],
[1, 2, 2, 3, 1]])
X2 = np.array([[1, 1, 2, 1, 0],
[0, 2, 1, 3, 2]])
n_S = 4
d = 8
ke = kernels.SoftSeriesWDK(L, n_S, d)
S = (ke.A @ ke.A.transpose(-1, -2)).detach().numpy()
w_flat = ke.w.detach().numpy()
i_x, i_y = np.tril_indices(L, k=-1)
w = np.zeros((L, L))
w[i_x, i_y] = w_flat
w[i_y, i_x] = w_flat
w = 1 / (1 + np.exp(-w))
K11 = np.zeros((len(X1), len(X1)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X1):
K11[i, j] = naive_sswdk(x1, x2, S, w)
K22 = np.zeros((len(X2), len(X2)))
for i, x1 in enumerate(X2):
for j, x2 in enumerate(X2):
K22[i, j] = naive_sswdk(x1, x2, S, w)
K12 = np.zeros((len(X1), len(X2)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X2):
K12[i, j] = naive_sswdk(x1, x2, S, w)
K1_star = np.expand_dims(np.sqrt(np.diag(K11)), 1)
K2_star = np.expand_dims(np.sqrt(np.diag(K22)), 0)
K12 = K12 / K1_star / K2_star
K = ke(torch.tensor(X1), torch.tensor(X2)).detach().numpy()
assert np.allclose(K12, K)
class Embedder(nn.Module):
def __init__(self, n_aa, dims, L):
super(Embedder, self).__init__()
self.emb = nn.Embedding(n_aa, dims[0])
self.relu = nn.ReLU()
self.lin1 = nn.Linear(dims[0] * L, dims[1])
layers = []
for d1, d2 in zip(dims[1:-1], dims[2:]):
layers.append(nn.ReLU())
layers.append(nn.Linear(d1, d2))
self.layers = nn.Sequential(*layers)
def forward(self, X):
b = len(X)
e = self.emb(X).view(b, -1)
e = self.lin1(self.relu(e))
return self.layers(e)
def naive_dwdk(network, x1, x2, n_aa, w):
L = len(x1)
e1 = network(x1[None, :]).view(L, n_aa, -1)
e2 = network(x2[None, :]).view(L, n_aa, -1)
S1 = e1.matmul(e1.transpose(-1, -2))
S2 = e2.matmul(e2.transpose(-1, -2))
# e = torch.cat([e1, e2], dim=-1)
# S = e.matmul(e.transpose(-1, -2)) / 2
k = 0
for i, (xx1, xx2) in enumerate(zip(x1, x2)):
s1 = S1[i, xx1, xx2]
s2 = S2[i, xx1, xx2]
others1 = 0
others2 = 0
for j, (a1, a2) in enumerate(zip(x1, x2)):
others1 += S1[j, a1, a2] * w[i, j]
others2 += S2[j, a1, a2] * w[i, j]
k += s1 * others1 + s2 * others2
return k
def test_deep_wdk():
L = 5
n_aa = 4
X1 = np.array([[0, 1, 2, 3, 1],
[0, 2, 1, 3, 2],
[1, 2, 2, 3, 1]])
X2 = np.array([[1, 1, 2, 1, 0],
[0, 2, 1, 3, 2]])
X1 = torch.tensor(X1).long()
X2 = torch.tensor(X2).long()
embedder = Embedder(n_aa, [32, 64, L * n_aa * 8], L)
ke = kernels.DeepWDK(embedder, n_aa, L)
w_flat = ke.w.detach().numpy()
i_x, i_y = np.tril_indices(L, k=-1)
w = np.zeros((L, L))
w[i_x, i_y] = w_flat
w[i_y, i_x] = w_flat
w = 1 / (1 + np.exp(-w))
K11 = np.zeros((len(X1), len(X1)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X1):
K11[i, j] = naive_dwdk(embedder, x1, x2, n_aa, w)
K22 = np.zeros((len(X2), len(X2)))
for i, x1 in enumerate(X2):
for j, x2 in enumerate(X2):
K22[i, j] = naive_dwdk(embedder, x1, x2, n_aa, w)
K12 = np.zeros((len(X1), len(X2)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X2):
K12[i, j] = naive_dwdk(embedder, x1, x2, n_aa, w)
K1_star = np.expand_dims(np.sqrt(np.diag(K11)), 1)
K2_star = np.expand_dims(np.sqrt(np.diag(K22)), 0)
K12 = K12 / K1_star / K2_star
K = ke(torch.tensor(X1), torch.tensor(X2)).detach().numpy()
print(K12)
print(K)
assert np.allclose(K12, K)
def naive_dswdk(network, x1, x2, n_aa):
L = len(x1)
e1 = network(x1[None, :]).view(L, n_aa, -1)
e2 = network(x2[None, :]).view(L, n_aa, -1)
S = torch.bmm(e1, e1.transpose(-1, -2))
S += torch.bmm(e2, e2.transpose(-1, -2))
S /= 2
k = 0
for i, (xx1, xx2) in enumerate(zip(x1, x2)):
k += S[i, xx1, xx2]
return k
def test_deep_series_wdk():
L = 5
n_aa = 4
X1 = np.array([[0, 1, 2, 3, 1],
[0, 2, 1, 3, 2],
[1, 2, 2, 3, 1]])
X2 = np.array([[1, 1, 2, 1, 0],
[0, 2, 1, 3, 2]])
X1 = torch.tensor(X1).long()
X2 = torch.tensor(X2).long()
_ = torch.manual_seed(0)
embedder = Embedder(n_aa, [32, 64, 5 * n_aa * L], L)
ke = kernels.DeepSeriesWDK(embedder, n_aa)
K11 = np.zeros((len(X1), len(X1)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X1):
K11[i, j] = naive_dswdk(embedder, x1, x2, n_aa)
K22 = np.zeros((len(X2), len(X2)))
for i, x1 in enumerate(X2):
for j, x2 in enumerate(X2):
K22[i, j] = naive_dswdk(embedder, x1, x2, n_aa)
K12 = np.zeros((len(X1), len(X2)))
for i, x1 in enumerate(X1):
for j, x2 in enumerate(X2):
K12[i, j] = naive_dswdk(embedder, x1, x2, n_aa)
K1_star = np.expand_dims(np.sqrt(np.diag(K11)), 1)
K2_star = np.expand_dims(np.sqrt(np.diag(K22)), 0)
K12 = K12 / K1_star / K2_star
K = ke(X1, X2).detach().numpy()
assert np.allclose(K12, K)
def test_sum_kernel():
L = 5
n_aa = 4
X1 = np.array([[0, 1, 2, 3, 1],
[0, 2, 1, 3, 2],
[1, 2, 2, 3, 1]])
X2 = np.array([[1, 1, 2, 1, 0],
[0, 2, 1, 3, 2]])
X1 = torch.tensor(X1).long()
X2 = torch.tensor(X2).long()
_ = torch.manual_seed(0)
embedders = [Embedder(n_aa, [32, 64, 5 * n_aa * L], L)
for _ in range(3)]
kes = [kernels.DeepSeriesWDK(emb, n_aa) for emb in embedders]
K1 = torch.zeros(3, 2)
for ke in kes:
K1 += ke(X1, X2)
ke = kernels.SumKernel(kes)
K2 = ke(X1, X2)
assert np.allclose(K1.detach().numpy(), K2.detach().numpy())
if __name__=="__main__":
test_fixed_wdk()
test_polynomial()
test_cdist()
test_matern()
test_se()
test_wdk()
test_swdk()
test_series_wdk()
test_soft_series_wdk()
test_deep_wdk()
# test_deep_series_wdk()
# test_sum_kernel()
| [
"seinchin@gmail.com"
] | seinchin@gmail.com |
7955ccc96c528dad18cf43d74e7af5ee585d03f4 | 655079c76272e82c6a92c1b74d52836872d0a3cf | /hmm/scripts/search_abstraction_batch.py | 590781f11d399463175ec23cf8c5d9ba28c15b88 | [
"MIT"
] | permissive | ondrejbiza/hmm | e843643560976f41f68ef06d4168bedf0963b1b2 | 1e9fe47a6057d93e7c77614016a89d5d46959e97 | refs/heads/master | 2023-04-01T10:45:03.971030 | 2019-10-16T20:38:31 | 2019-10-16T20:38:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | import os
import logging
import pickle
import numpy as np
import tensorflow as tf
from ..runners import abstraction_learn_actions_tf
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
tf.get_logger().setLevel(logging.ERROR)
lrs = [1.0, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001]
dims = [2, 3, 4, 8, 16, 32]
mu_sds = [0.1, 1.0, 5.0]
cov_sds = [0.1, 1.0, 5.0]
runs = 10
results = dict()
results_path = "./results/search_abstraction_batch.pickle"
if not os.path.isdir("./results"):
os.makedirs("./results")
if os.path.isfile(results_path):
with open(results_path, "rb") as file:
results = pickle.load(file)
for i, mu_sd in enumerate(mu_sds):
for j, cov_sd in enumerate(cov_sds):
for k, lr in enumerate(lrs):
for l, dim in enumerate(dims):
for run_idx in range(10):
if (mu_sd, cov_sd, lr, dim, run_idx) in results:
continue
print("running:", (mu_sd, cov_sd, lr, dim, run_idx))
best_accuracy, _ = abstraction_learn_actions_tf.main(
dim, 10, lr, 500, 100, False, 100, mu_sd, cov_sd, False, "1"
)
tf.reset_default_graph()
if np.any(np.isnan(best_accuracy)):
best_accuracy = 0.0
results[mu_sd, cov_sd, lr, dim, run_idx] = best_accuracy
with open(results_path, "wb") as file:
pickle.dump(results, file)
| [
"ondrej.biza@gmail.com"
] | ondrej.biza@gmail.com |
5c4ba5f60c58bee30a636b642431ad1b452ef7ee | ade58184a307bad28916d8bbdcd81443b5102545 | /manager.py | 9355af0938108d2cf069b043ff6f12e905088e10 | [] | no_license | eshita92/Farmmeto | 672c92baaa2efddce2863b4b20b40e07d3be0c01 | 35fece37151b1712733a709764f51560f4d252b9 | refs/heads/master | 2022-11-06T16:28:46.294834 | 2020-06-19T15:54:27 | 2020-06-19T15:54:27 | 273,531,487 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,850 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 27 23:16:49 2020
@author: Blessy
"""
# Building CNN based on AlexNet Architecture
# Importing Keras libraries and packages
from keras.preprocessing import image
import numpy as np
from keras.models import Sequential
from keras.layers import Convolution2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.layers.normalization import BatchNormalization
from keras import optimizers
from keras.preprocessing.image import ImageDataGenerator
#from keras.callbacks import ModelCheckpoint
# Initializing the CNN
classifier = Sequential()
# Convolution Step 1
classifier.add(Convolution2D(96, 11, strides = (4, 4), padding = 'valid', input_shape=(224, 224, 3), activation = 'relu'))
# Max Pooling Step 1
classifier.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'valid'))
classifier.add(BatchNormalization())
# Convolution Step 2
classifier.add(Convolution2D(256, 11, strides = (1, 1), padding='valid', activation = 'relu'))
# Max Pooling Step 2
classifier.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding='valid'))
classifier.add(BatchNormalization())
# Convolution Step 3
classifier.add(Convolution2D(384, 3, strides = (1, 1), padding='valid', activation = 'relu'))
classifier.add(BatchNormalization())
# Convolution Step 4
classifier.add(Convolution2D(384, 3, strides = (1, 1), padding='valid', activation = 'relu'))
classifier.add(BatchNormalization())
# Convolution Step 5
classifier.add(Convolution2D(256, 3, strides=(1,1), padding='valid', activation = 'relu'))
# Max Pooling Step 3
classifier.add(MaxPooling2D(pool_size = (2, 2), strides = (2, 2), padding = 'valid'))
classifier.add(BatchNormalization())
# Flattening Step
classifier.add(Flatten())
# Full Connection Step
classifier.add(Dense(units = 4096, activation = 'relu'))
classifier.add(Dropout(0.4))
classifier.add(BatchNormalization())
classifier.add(Dense(units = 4096, activation = 'relu'))
classifier.add(Dropout(0.4))
classifier.add(BatchNormalization())
classifier.add(Dense(units = 1000, activation = 'relu'))
classifier.add(Dropout(0.2))
classifier.add(BatchNormalization())
classifier.add(Dense(units = 38, activation = 'softmax'))
classifier.summary()
# Compiling the CNN
#classifier.compile(optimizer='adam',loss='categorical_crossentropy',
# metrics=['accuracy'])
# Compiling the CNN
classifier.compile(optimizer=optimizers.SGD(lr=0.001, momentum=0.9, decay=0.005),
loss='categorical_crossentropy',
metrics=['accuracy'])
# image preprocessing
train_datagen = ImageDataGenerator(rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
rotation_range=40,
horizontal_flip=True,
fill_mode='nearest')
test_datagen = ImageDataGenerator(rescale=1./255)
batch_size = 32
train_data_dir = (r"E:\D_folderBackup\6thSemester\FinalProject\plant-disease\dataset\train") # directory of training data
test_data_dir = (r"E:\D_folderBackup\6thSemester\FinalProject\plant-disease\dataset\test") # directory of test data
training_set = train_datagen.flow_from_directory(train_data_dir,
target_size=(224, 224),
batch_size=batch_size,
class_mode='categorical')
test_set = test_datagen.flow_from_directory(test_data_dir,
target_size=(224, 224),
batch_size=batch_size,
class_mode='categorical')
print(training_set.class_indices)
# # checkpoint
# weightpath = "weights_1.hdf5"
# checkpoint = ModelCheckpoint(weightpath, monitor='val_acc', verbose=1, save_best_only=True, mode='max')
# callbacks_list = [checkpoint]
#
#
# #fitting images to CNN
# history = classifier.fit_generator(training_set,
# steps_per_epoch=training_set.samples//batch_size,
# validation_data=test_set,
# epochs=50,
# validation_steps=test_set.samples//batch_size,
# callbacks=callbacks_list)
#fitting images to CNN
history = classifier.fit_generator(training_set,
steps_per_epoch=training_set.samples//batch_size,
validation_data=test_set,
epochs=2,
validation_steps=test_set.samples//batch_size)
#saving model
filepath="model.hdf5"
classifier.save(filepath)
#plotting training values
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(loss) + 1)
#accuracy plot
plt.plot(epochs, acc, color='green', label='Training Accuracy')
plt.plot(epochs, val_acc, color='blue', label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
#loss plot
plt.plot(epochs, loss, color='pink', label='Training Loss')
plt.plot(epochs, val_loss, color='red', label='Validation Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show() | [
"noreply@github.com"
] | eshita92.noreply@github.com |
70964f54b2c252bf34810cb4b378fc77f351ef7d | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/baiduads/dpacreative/model/bat_set_range_response_wrapper_body.py | bf3a52bf83e136d4770a30049ef79667567a1bfd | [
"Apache-2.0"
] | permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 10,962 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from baiduads.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from baiduads.exceptions import ApiAttributeError
class BatSetRangeResponseWrapperBody(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'data': ([dict],), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""BatSetRangeResponseWrapperBody - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([dict]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""BatSetRangeResponseWrapperBody - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data ([dict]): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"jiangyuan04@baidu.com"
] | jiangyuan04@baidu.com |
cf989725038467206722394ad1e8f7c9cb87799d | abfed9978789cd3ea4c72b5831e596beb3130738 | /Regex Algorithm.py | 5318fa0b5b9912e2ac003b2745351b706554f168 | [] | no_license | test1932/Regex-script-s- | ebc05179940b2db47df49a0ecea0309bbf1faf3b | 6c68de49553474baa545b65b6d44dc31b4f1746a | refs/heads/main | 2023-07-06T16:22:14.064189 | 2021-07-30T08:01:55 | 2021-07-30T08:01:55 | 390,781,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,901 | py | import re #import regex
importFileAdr=["pseudosyringae_proteins.fa","P.boem.v1.proteins.fa"]#files to be searched
FileIdent=["pseudosyringae","boehmeriae"]#name of proteins
patternlist=["R.LR.{0,50}[ED][ED][RK]","A.MY.S.{2}FPKDSPVTGLGHR", "GHRHDWE", "H.GPCE.{3}D{2}", "VWNQPVRGFKV.E","L.LFLAK"]#patterns to search
patternNameList=["RXLR","NLP1","NLP2","HXGPCE","PEP13","CRN"]#pattern names
def FindHeader():
IndexHArr=[]
patHead=re.compile(">");patHeadE=re.compile("\n")#.index() was inefficient
fileMatchObjS=patHead.search(file)
while fileMatchObjS:#while a pattern match exists
fileMatchObjE=patHeadE.search(file,fileMatchObjS.start()+1)
startpos=fileMatchObjS.start();endpos=fileMatchObjE.start()
IndexHArr.append([startpos,endpos])#append header start/end indexes to array
fileMatchObjS=patHead.search(file,fileMatchObjS.start()+1)
return IndexHArr
def spliceSequences(IndexHArr):
SequencesArr=[]
for index in range(len(IndexHArr)):
try:
SequencesArr.append(file[IndexHArr[index][1]+1:IndexHArr[index+1][0]].replace("\n",""))
except IndexError:#occurs on last index
SequencesArr.append(file[IndexHArr[index][1]+1:].replace("\n",""))
return SequencesArr
def LocatePattern(SequencesArr,locPatIndex):
isPresentArr=[]
pat=re.compile(patternlist[locPatIndex])
for item in SequencesArr:
isPresentArr.append(bool(pat.search(item)))#append whether match was found
return isPresentArr
def OutputData(IndexHArr,SequencesArr,isPresentArr,fIndex,locPatIndex):
writefile=open("#"+FileIdent[fIndex]+"_"+patternNameList[locPatIndex]+".txt","w")#concatenate filename
count=0
for index in range(len(IndexHArr)):
if isPresentArr[index]:
count+=1
writefile.write(file[IndexHArr[index][0]:IndexHArr[index][1]]+"\t\t"+SequencesArr[index]+"\n")#write data to file
writefile.close()
return count
def hitsOutput(totals, fIndex):
totalsFile=open("#"+FileIdent[fIndex]+"_total_hits.txt","w")
for index in range(len(totals)):
totalsFile.write(patternNameList[index]+":\t"+str(totals[index])+"\n")
totalsFile.close()
#main program
for fileIndex in range(len(importFileAdr)):
totalHits=[]
fileObj=open(importFileAdr[fileIndex],"r");file=fileObj.read()#open proteins file
IndexHArr=FindHeader()#call FindHeader subprogram
SequencesArr=spliceSequences(IndexHArr)#call spliceSequences subprogram
fileObj.close()
for patIndex in range(len(patternlist)):#loop through patterns
isPresentArr=LocatePattern(SequencesArr,patIndex)#call LocatePattern subprogram
totalHits.append(OutputData(IndexHArr,SequencesArr,isPresentArr,fileIndex,patIndex))#call OutputData subprogram
hitsOutput(totalHits,fileIndex)
| [
"noreply@github.com"
] | test1932.noreply@github.com |
464092019080bf601c6b7268ed92c44523d66b49 | a9b6d9842e3094894758db3165496ee373183d74 | /lists/migrations/0003_list.py | acf50aef7d2e52839b5f74eb0dc7e40ba8ab67c9 | [] | no_license | ajroberts0417/django_tdd | 49e2b183780a88b202490685cf77560aa0906a24 | e47ccfda48b45a3830e93211449d3ad30c49af55 | refs/heads/master | 2020-03-26T09:50:41.533660 | 2018-08-20T20:02:42 | 2018-08-20T20:02:42 | 144,767,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-15 23:24
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0002_item_text'),
]
operations = [
migrations.CreateModel(
name='List',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
| [
"ajroberts0417@gmail.com"
] | ajroberts0417@gmail.com |
b59641920ce0787bdda82226455c999d8bfa5e60 | 23b3c698412f71a2878ae586f5599f2b6e38c980 | /source-code/bokeh/ising.py | 1dab727e38f3966bbe674a61422b703eec89d4d9 | [
"CC-BY-4.0"
] | permissive | gjbex/Scientific-Python | 9b7ae7b3398cc9358d1f530ca24243b63f3c01f0 | 02d24e6e22cfbc5b73429a2184ecbdfcd514c8fc | refs/heads/master | 2023-08-17T10:17:39.963630 | 2023-05-12T14:51:32 | 2023-05-12T14:51:32 | 221,184,612 | 13 | 13 | CC-BY-4.0 | 2023-08-03T08:04:57 | 2019-11-12T09:55:27 | Jupyter Notebook | UTF-8 | Python | false | false | 964 | py | #!/usr/bin/env python
from argparse import ArgumentParser
from bokeh.layouts import column
from bokeh.models import CustomJS, ColumnDataSource, Slider
from bokeh.plotting import curdoc, figure
import numpy as np
x = np.linspace(-3.0, 3.0, 301)
y = x.copy()
default_beta = 4.0
y_tanh = np.tanh(default_beta*x)
source = ColumnDataSource(data=dict(x=x, y=y_tanh))
def callback(attr, old_value, new_value):
beta = new_value
new_data = {
'x': source.data['x'],
'y': np.tanh(beta*source.data['x']),
}
source.data = new_data
plot = figure(width=300, height=300)
plot.line(x, y, line_width=0.5, line_dash='3 3')
plot.line('x', 'y', source=source)
plot.xaxis.axis_label = '$$x$$'
plot.yaxis.axis_label = r'$$\tanh \beta x$$'
slider = Slider(start=0.2, end=6.0, value=default_beta, step=0.01,
title=r'$$\beta$$')
slider.on_change('value', callback)
layout = column(children=[plot, slider])
curdoc().add_root(layout)
| [
"geertjan.bex@uhasselt.be"
] | geertjan.bex@uhasselt.be |
94ad119004a4fd0ddd961a8ed9e3b31bb811fd1a | 1b7f4cd39bf7e4a2cf667ac13244e5138ee86cb2 | /agents/game/human_agent.py | b04040eb7f8fea24819cc7ddc959c01950f3bda1 | [
"MIT"
] | permissive | cjreynol/willsmith | 02f793003a914a21b181839bbd58108046f312d6 | 39d3b8caef8ba5825f3a0272c7fd61a2f78ef2b5 | refs/heads/master | 2020-07-15T13:25:57.613707 | 2018-06-12T00:18:19 | 2018-06-12T00:18:19 | 205,572,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,390 | py | from agents.displays.human_display import HumanDisplay
from willsmith.game_agent import GameAgent
class HumanAgent(GameAgent):
"""
Agent that relies on user input to make action choices.
It relies on its action_prompt attribute, set externally by the
simulator, to provide the proper prompts and to construct the action.
"""
GUI_DISPLAY = None #HumanDisplay is not yet ready
INPUT_PROMPT = None
INPUT_PARSER = None
def __init__(self, agent_id, use_gui, action):
super().__init__(agent_id, use_gui)
self.add_input_info(action)
def add_input_info(self, action):
HumanAgent.INPUT_PROMPT = action.INPUT_PROMPT
HumanAgent.INPUT_PARSER = action.parse_action
def search(self, state, allotted_time):
"""
Prompt the player for an action until a legal action is chosen, then
return it.
"""
legal_actions = state.get_legal_actions()
player_action = HumanAgent.INPUT_PARSER(input(HumanAgent.INPUT_PROMPT))
while player_action not in legal_actions:
print("Last move was not legal, please try again.\n")
player_action = HumanAgent.INPUT_PARSER(input(HumanAgent.INPUT_PROMPT))
return player_action
def _take_action(self, action):
pass
def _reset(self):
pass
def __str__(self):
return ""
| [
"cjreynol13@aol.com"
] | cjreynol13@aol.com |
9d6b3dbe8c1086905113bc6961e0043f78bed088 | 7849c8276a9eb0a93be6c8f387c992dcce88eaf6 | /adslproxy/__init__.py | ad748a8a154c074e76126e2a2cd81be3e083ff78 | [
"MIT"
] | permissive | jason021sh/lightproxy | 24b68fa976bab028984cdda7143f640e54aa63fb | 4a7eba19e248c551190f46d1661ba775dad2efd3 | refs/heads/master | 2022-11-10T00:38:23.949662 | 2020-06-15T08:34:30 | 2020-06-15T08:34:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | # coding=utf-8
__version__ = '2.0.0'
# from adslproxy.db import RedisClient
# from adslproxy.api import server
def version():
return __version__
| [
"noreply@github.com"
] | jason021sh.noreply@github.com |
f3c047ac5d429cfb32fe30af204675af54b253f8 | a1e4d02d1b24b3aa75ef8408f96c733ae3cd52ac | /subhalo/detection.py | cd242dcbe4917af9cd2cee31586d4dfa7541a7c4 | [] | no_license | calvinosinga/HIColor | 11d6056d0625a9b83b0efc66654c10178ac7e3d8 | 2c9a3ef79b736fa04d79ade488932179866bb284 | refs/heads/master | 2023-02-12T07:20:28.626102 | 2021-01-01T01:13:24 | 2021-01-01T01:13:24 | 240,795,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,058 | py |
import numpy as np
import h5py as hp
grid = (2048,2048,2048)
def get_mass(key,path):
try:
f=hp.File(path,'r')
except IOError:
print('files not found')
return np.zeros(grid, dtype=np.float32), np.zeros(3)
else:
try:
mass = f[key]
flags = f['flags']
except KeyError:
print(key+' field not found - creating substitute')
return np.zeros(grid, dtype=np.float32), np.zeros(3)
else:
return mass,flags
total,totflags = get_mass('red','red_final.hdf5')
sum1 = np.sum(total)
print('first field:' + str(sum1))
m,fl = get_mass('blue','blue_final.hdf5')
totflags=np.add(totflags,fl)
sum2 =np.sum(m)
print('second field:' + str(sum2))
total=np.add(total,m)
tot1 = np.sum(total)
print('first sum should be' + str(sum1+sum2)+', is: ' +str(tot1))
# m,fl = get_mass(run,third)
# sum3 = np.sum(m)
# totflags=np.add(totflags,fl)
# print('third field:' + str(sum3))
# total=np.add(total,m)
# tot2=np.sum(total)
# print('second sum should be' + str(sum1+sum2+sum3)+', is: ' +str(tot2))
# m,fl = get_mass(run,fourth)
# totflags=np.add(totflags,fl)
# sum4=np.sum(m)
# print('third field:' + str(sum4))
# total = np.add(total,m)
# tot3 = np.sum(total)
# print('last sum should be '+str(sum1+sum2+sum3+sum4)+', is: '+ str(tot3))
w = hp.File('detection_final.hdf5','w')
w.create_dataset('detection',data=total)
w.create_dataset("flags",data=totflags)
print(totflags)
# if run == 'subhalo':
# keys = ['red','blue','dim','bright','nondetection']
# ls = ['magnitude','color']
# w.hp.File(result, 'w')
# for k in keys:
# total = get_mass(k,first)
# m = get_mass(k,second)
# total = np.add(total,m)
# m = get_mass(k,third)
# total = np.add(total, m)
# m = get_mass(k,fourth)
# total = np.add(total,m)
# w.create_dataset(k,data=total)
# for l in ls:
# total = get_field(l,first)
# m = get_field(l,second)
# total.extend(m)
# m = get_field(l,third)
# total.extend(m)
# m=get_field(l,fourth)
# total.extend(m)
# w.create_dataset(l,data=total)
# def get_field(key,path):
# try:
# f=hp.File(path,'r')
# except IOError:
# print('files not found')
# return []
# else:
# try:
# field = f[key]
# except KeyError:
# print(key+' field not found - creating substitute')
# return []
# else:
# return field
# if run == 'magnitude' or run == 'color':
# """
# Since these are lists rather than np arrays
# """
# total = get_field(run,first)
# m = get_field(run,second)
# total.extend(m)
# m = get_field(run,third)
# total.extend(m)
# m = get_field(run,fourth)
# total.extend(m)
# w = hp.File(result,'w')
# w.create_dataset(run,data=total)
| [
"cosinga@wisc.edu"
] | cosinga@wisc.edu |
70f4e03aa8a2930c56a4ec84979dc5bb1e836e28 | 745a605d52556d5195b7cdbf871fc1011b2dc9cd | /backend/mete/models.py | 92b2828ee3753d37d2fa5baa61d5d362342dc181 | [] | no_license | annikahannig/meteme | 96a6b919fbdac20bef7e13e1d101130cd1805b7b | 16ca646904a31833e8d1156be8f554e11ff0d37a | refs/heads/master | 2021-06-25T05:34:23.517379 | 2017-05-09T20:33:54 | 2017-05-09T20:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,526 | py | from __future__ import unicode_literals
from collections import OrderedDict
from django.db import models
from django.conf import settings
from djmoney.models.fields import MoneyField
from moneyed import Money
from solo.models import SingletonModel
from store import models as store_models
from unidecode import unidecode
import re
class Account(models.Model):
"""
User account:
We manage user accounts, separate from 'Users', because
they don't have a password, may not have an email,
and have an avatar.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL,
null=False,
blank=False,
on_delete=models.CASCADE)
avatar = models.ImageField(upload_to='avatars/',
default='/static/store/img/default_avatar.png',
null=True, blank=True)
balance = MoneyField(max_digits=10,
decimal_places=2,
default_currency='EUR',
default=Money(0, 'EUR'))
is_locked = models.BooleanField(default=False)
is_disabled = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
def __unicode__(self):
return self.name
@property
def name(self):
return self.user.username
@property
def canonical_name(self):
"""Return normalized username"""
name = unidecode(self.name) # Transliterate umlauts
name = re.sub(r'\W', '', name).lower()
return name
class Barcode(models.Model):
"""
Barcode(s) can be associated with an account
or with a product.
"""
number = models.CharField(unique=True, max_length=42)
product = models.ForeignKey(store_models.Product,
null=True,
blank=True,
on_delete=models.CASCADE)
account = models.ForeignKey(Account,
null=True,
blank=True,
on_delete=models.CASCADE)
class KeyPair(models.Model):
"""
A user may supply a public/private key pair,
so we can encrypt the audit log.
If a user does not have a key pair, no personal
log will be created.
The the keys are created on the client using the NaCL
crypto library.
The private key is encrypted with a key derived from a password / pin,
using the 'Password-Base Key Derivation Function 2' (PBKDF2) with
at least 3 million iterations.
The first 4 bytes of the encrypted private key determin
additional hashing rounds as a measure against rainbow tables.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL,
null=False,
blank=False,
on_delete=models.CASCADE)
crypto_version = models.PositiveSmallIntegerField(default=1)
private_key = models.CharField(max_length=68,
blank=False,
null=False,
unique=True)
public_key = models.CharField(max_length=64,
blank=False,
null=False,
unique=True)
verify_key = models.CharField(max_length=64,
blank=False,
null=False,
unique=True)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, blank=True)
class TransactionManager(models.Manager):
def get_queryset(self):
"""
Override default queryset to order transactions
by date DESC
"""
qs = super(TransactionManager, self).get_queryset()
qs = qs.order_by('-created_at')
return qs
def donations(self):
transactions = self.get_queryset()
return transactions.filter(product__isnull=False)
def donations_grouped_months(self):
""" Get donations, grouped by month """
donations = self.donations()
groups = OrderedDict()
for transaction in donations:
key = (transaction.created_at.year, transaction.created_at.month)
if groups.get(key) is None:
groups[key] = []
groups[key].append(transaction)
return groups
def grouped(self):
transactions = self.get_queryset()
groups = OrderedDict()
for transaction in transactions:
date = transaction.created_at
date = date.replace(hour=0, minute=0, second=0, microsecond=0)
if groups.get(date) is None:
groups[date] = []
groups[date].append(transaction)
return groups
def grouped_month(self):
transactions = self.get_queryset()
groups = OrderedDict()
for transaction in transactions:
key = (transaction.created_at.year, transaction.created_at.month)
if groups.get(key) is None:
groups[key] = []
groups[key].append(transaction)
return groups
class Transaction(models.Model):
"""
Log Transactions.
Do not store the associated account.
This is just an audit log.
"""
amount = MoneyField(max_digits=10,
decimal_places=2,
default_currency='EUR')
product = models.ForeignKey('store.Product', null=True, blank=True)
product_name = models.CharField(null=True, blank=True, max_length=80)
created_at = models.DateTimeField(auto_now_add=True, blank=True)
objects = TransactionManager()
class UserSetting(models.Model):
"""
Configure per user preferences, like:
Limiting categories. (This is it for now)
"""
user = models.OneToOneField('auth.User',
null=False,
blank=False,
on_delete=models.CASCADE)
categories = models.ManyToManyField('store.Category',
blank=True)
class Settings(SingletonModel):
price_set = models.ForeignKey('store.PriceSet', null=True, blank=False, default=1)
| [
"matthias@hannig.cc"
] | matthias@hannig.cc |
9060c56c89249c734e758aa255eb344845dcdd91 | b6e057297f5545f7e995d6f454b83953ed81dd42 | /TPraticas/Aula3/E01/verify-app.py | c3b700979c5c03d507a091a49639ca3fd6f348d4 | [] | no_license | mateuuss/Grupo12 | 203451067fac6ca1164fd3de223aa22f7fc8140c | 660063bf25ebbff182decf05eff7e57575bc2619 | refs/heads/master | 2022-11-21T17:05:29.235606 | 2020-07-24T18:38:58 | 2020-07-24T18:38:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,193 | py | # coding: latin-1
###############################################################################
# eVotUM - Electronic Voting System
#
# verify-app.py
#
# Cripto-7.4.1 - Commmad line app to exemplify the usage of verifySignature
# function (see eccblind.py)
#
# Copyright (c) 2016 Universidade do Minho
# Developed by André Baptista - Devise Futures, Lda. (andre.baptista@devisefutures.com)
# Reviewed by Ricardo Barroso - Devise Futures, Lda. (ricardo.barroso@devisefutures.com)
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
###############################################################################
"""
Command line app that receives signer's public key from file and Data, Signature, Blind Components and
prComponents from STDIN and writes a message to STDOUT indicating if the signature is valid..
"""
import sys
from eVotUM.Cripto import eccblind
from eVotUM.Cripto import utils
def printUsage():
print("Usage: python verify-app.py -cert <certificado do assinante> -msg <mensagem original a assinar> "
"-sDash <Signature> -f <ficheiro do requerente>")
def parseArgs():
if len(sys.argv) < 9 or sys.argv[1] != '-cert' or sys.argv[-2] != '-f' or sys.argv[-4] != '-sDash':
printUsage()
else:
eccPublicKeyPath = sys.argv[2]
data = ' '.join(sys.argv[4:-4])
sDash = sys.argv[-3]
with open(sys.argv[-1], 'r') as f:
requesterFile = f.read()
main(eccPublicKeyPath, data, sDash, requesterFile)
def showResults(errorCode, validSignature):
print("Output")
if errorCode is None:
if validSignature:
print("Valid signature")
else:
print("Invalid signature")
elif errorCode == 1:
print("Error: it was not possible to retrieve the public key")
elif errorCode == 2:
print("Error: pR components are invalid")
elif errorCode == 3:
print("Error: blind components are invalid")
elif errorCode == 4:
print("Error: invalid signature format")
def main(eccPublicKeyPath, data, signature, requesterFile):
pemPublicKey = utils.readFile(eccPublicKeyPath)
# Store the content of the requester file in variables
blindComponents = requesterFile[18:requesterFile.find('\n')]
pRComponents = requesterFile[requesterFile.find('\n') + 15:]
errorCode, validSignature = eccblind.verifySignature(pemPublicKey, signature, blindComponents, pRComponents, data)
showResults(errorCode, validSignature)
if __name__ == "__main__":
parseArgs()
| [
"noreply@github.com"
] | mateuuss.noreply@github.com |
bf754f39b9de1abd54afd78dfc0fdf4162003c97 | 7a12289ae78937ae40f1e8c121fd3c0dcf8a6ee1 | /main.py | a10c7dcec808e1718f6f914c0049990cda23135f | [
"MIT"
] | permissive | bojone/small_norb | 32ed46bac8b5a4853f1b00212cee09d3de8fe88b | c5db82d5426bf30d800eaeedc5ec1eb828e603c4 | refs/heads/master | 2020-03-09T05:49:28.669289 | 2018-04-08T09:23:05 | 2018-04-08T09:23:05 | 128,623,304 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | import matplotlib.pyplot as plt
from smallnorb.dataset import SmallNORBDataset
plt.ion()
if __name__ == '__main__':
# Initialize the dataset from the folder in which
# dataset archives have been uncompressed
dataset = SmallNORBDataset(dataset_root='./smallnorb/')
# Dump all images to disk
dataset.export_to_jpg(export_dir='smallnorb_export')
# Explore random examples of the training set
# to show how data look like
dataset.explore_random_examples(dataset_split='train')
| [
"ndrplz@gmail.com"
] | ndrplz@gmail.com |
f0701b76e300b53794a20d383a41472054a14abe | c459f4dd7b198ec8d8db8379726a5b2650be6636 | /regis/apps.py | b08ff1b7229ca929d911653fbb1a9cf748bcef33 | [] | no_license | jittat/admapp | 4c712182cd06e82efab6c2513fb865e5d00feae8 | 38bf299015ae423b4551f6b1206742ee176b8b77 | refs/heads/master | 2023-06-10T03:23:41.174264 | 2023-06-09T19:41:03 | 2023-06-09T19:41:03 | 101,953,724 | 10 | 4 | null | 2023-04-21T22:48:55 | 2017-08-31T03:12:04 | Python | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class RegisConfig(AppConfig):
name = 'regis'
| [
"jittat@gmail.com"
] | jittat@gmail.com |
f22a4469cb502ee17665beff83beebab1a8e70b4 | d2afc297840efaacd2520e2f9604254ea02ec55c | /practice/get_max_len_number_from_string.py | a9844d4efd5504db7933fb3d89e71f871e5f0798 | [] | no_license | gyfpython/start_python | 974a3e0c8029a8795c7ec9872c4b34814ba8527d | 940bf71cf98e643cb57328767315b5267fa43d9c | refs/heads/master | 2021-09-11T09:06:00.709046 | 2021-09-01T14:22:47 | 2021-09-01T14:22:47 | 233,510,969 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | """
获取字符串中连续最长的数字串,包含小数
长度相同取较大的
"""
def get_max_number(str2):
max1 = ""
if str2[0] == '.':
str2 = str2[1:]
list1 = str2.split('.')
if len(list1) == 1:
return list1[0]
elif len(list1) == 2:
if list1[-1] == '':
return list1[0]
else:
return str2
else:
for i in range(len(list1)-1):
tmp = list1[i] + '.' + list1[i+1]
if len(tmp) > len(max1):
max1 = tmp
elif len(tmp) == len(max1):
if float(tmp) >= float(max1):
max1 = tmp
return max1
while True:
try:
str1 = input()
str1 = str1 + 'A'
temp = ''
all_num1 = []
for i in str1:
if i.isdigit() or i == '.':
temp = temp + i
else:
if temp != "":
if temp == '.':
temp = ''
continue
if temp[-1] == '.':
all_num1.append(temp[:-1])
temp = ''
else:
all_num1.append(temp)
temp = ''
print(all_num1)
maxlen = ''
for test in all_num1:
maxlentmp = get_max_number(test)
if len(maxlentmp) > len(maxlen):
maxlen = maxlentmp
elif len(maxlentmp) == len(maxlen):
if float(maxlentmp) >= float(maxlen):
maxlen = maxlentmp
else:
pass
else:
pass
print(maxlen)
except:
break
| [
"1453365491@qq.com"
] | 1453365491@qq.com |
5cf9e4839963c2c5dace99204f707d7e8424f061 | 14c5bd382ac9ffbfa4ae34f244bca6685f3cd18c | /apps/geotracker/models.py | d3eff90a8929fa59880c39ed709ce3692949a42b | [] | no_license | redhog/arwen | e8705e978588163554c83e3278297506c1ffb2ce | 342daa97a72c0776d4dfe27196adfe66d4dff63c | refs/heads/master | 2021-01-17T13:08:09.392613 | 2011-08-26T09:21:40 | 2011-08-26T09:21:40 | 2,084,644 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,235 | py | # -*- coding: utf-8 -*-
import django.contrib.auth.models
from django.utils.translation import ugettext_lazy as _
import django.contrib.gis.db.models
import geotracker.geos
import linkableobject.models
class Vehicle(django.contrib.gis.db.models.Model, linkableobject.models.LinkableModelMixin):
objects = django.contrib.gis.db.models.GeoManager()
name = django.contrib.gis.db.models.CharField(_('name'), max_length=256)
description = django.contrib.gis.db.models.TextField(_('description'))
owner = django.db.models.ForeignKey(django.contrib.auth.models.User, related_name="owned_vehicles")
def __unicode__(self):
return self.name
class TimePoint(django.contrib.gis.db.models.Model, linkableobject.models.LinkableModelMixin):
objects = django.contrib.gis.db.models.GeoManager()
timestamp = django.contrib.gis.db.models.DateTimeField()
point = django.contrib.gis.db.models.PointField(geography=True)
@property
def as_geosfeature(self):
return geotracker.geos.GEOSFeature(self.point, self.id, timestamp = self.timestamp)
@property
def as_geoscollection(self):
return geotracker.geos.GEOSFeatureCollection([self.as_geosfeature])
def __unicode__(self):
return "%s @ %s" % (self.point, self.timestamp)
class Path(django.contrib.gis.db.models.Model, linkableobject.models.LinkableModelMixin):
objects = django.contrib.gis.db.models.GeoManager()
timestamp = django.contrib.gis.db.models.DateTimeField()
name = django.contrib.gis.db.models.CharField(_('name'), max_length=256)
description = django.contrib.gis.db.models.TextField(_('description'))
@property
def as_geosfeature(self):
return geotracker.geos.GEOSFeature(django.contrib.gis.geos.LineString([point.point for point in self.points.order_by('timestamp')]),
self.id,
name = self.name,
description = self.description)
@property
def as_geoscollection(self):
res = geotracker.geos.GEOSFeatureCollection([self.as_geosfeature])
for point in self.points.order_by('timestamp'):
res += point.as_geoscollection
return res
def __unicode__(self):
return self.name
class PathPoint(TimePoint):
path = django.contrib.gis.db.models.ForeignKey(Path, related_name='points')
path.verbose_related_name = _("Points")
@property
def as_geosfeature(self):
return geotracker.geos.GEOSFeature(self.point, self.id, timestamp = self.timestamp, path = self.path.id)
class Journey(django.contrib.gis.db.models.Model, linkableobject.models.LinkableModelMixin):
objects = django.contrib.gis.db.models.GeoManager()
vehicle = django.db.models.ForeignKey(Vehicle, related_name="journeys")
vehicle.verbose_related_name = _("Journeys")
owner = django.db.models.ForeignKey(django.contrib.auth.models.User, related_name="organized_journeys")
owner.verbose_related_name = _("Organized journeys")
name = django.contrib.gis.db.models.CharField(_('name'), max_length=256)
description = django.contrib.gis.db.models.TextField(_('description'))
@property
def as_geosfeature(self):
return geotracker.geos.GEOSFeature(django.contrib.gis.geos.MultiLineString([path.as_geosfeature.geometry for path in self.paths.order_by('timestamp')]),
self.id,
vehicle = self.vehicle.id,
owner = self.owner.id,
name = self.name,
description = self.description)
@property
def as_geoscollection(self):
res = geotracker.geos.GEOSFeatureCollection([self.as_geosfeature])
for path in self.paths.order_by('timestamp'):
res += path.as_geoscollection
return res
def __unicode__(self):
return self.name
class JourneyPath(Path):
journey = django.contrib.gis.db.models.ForeignKey(Journey, related_name='paths', verbose_name=_('Journey'))
journey.verbose_related_name = _("Paths")
| [
"egil.moller@freecode.no"
] | egil.moller@freecode.no |
17f68f4a271ef3e1abafb87410b9182f9486a073 | c5c5a2ce8b7762390c0ac82e1a025231d12efe06 | /recipes/models.py | b49bf91231f9bdc9d43b19398d4b48ff06fd947f | [] | no_license | jtebert/lazy-baker | c5f21d2db27c4c28189620bf5ee0548b18fe498a | c9a28b460cb74e579e8edb5680af0ece45fac9ca | refs/heads/master | 2022-12-11T06:31:24.743834 | 2020-03-05T15:50:43 | 2020-03-05T15:50:43 | 101,441,489 | 2 | 1 | null | 2022-12-08T03:11:20 | 2017-08-25T20:54:20 | CSS | UTF-8 | Python | false | false | 10,133 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.db.models import Q
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from modelcluster.fields import ParentalKey
from wagtail.core.models import Page, Orderable
from wagtail.search import index
from wagtail.core.fields import StreamField
from wagtail.core import blocks
from wagtail.embeds.blocks import EmbedBlock
from wagtail.images.blocks import ImageChooserBlock
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.admin.edit_handlers import (FieldPanel,
FieldRowPanel,
InlinePanel,
PageChooserPanel,
StreamFieldPanel)
from .utils import format_ingredient_line
md_format_help = 'This text will be formatted with markdown.'
class CaptionedImageBlock(blocks.StructBlock):
image = ImageChooserBlock()
caption = blocks.CharBlock(help_text='This will override the default caption.'+md_format_help,
blank=True, null=True, required=False)
class Meta:
icon = 'image'
template = 'captioned_image_block.html'
label = 'Image'
class CategoryPage(Page):
"""
Identifies the different categories to apply to recipes (can apply multiple to a recipe)
"""
parent_page_types = ['CategoryIndexPage', 'CategoryGroupPage']
subpage_types = []
description = models.TextField(
max_length=800,
null=True, blank=True,
help_text=md_format_help)
icon = models.ForeignKey(
'images.CustomImage',
null=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='This should be a square line-based icon in the right color'
)
content_panels = Page.content_panels + [
ImageChooserPanel('icon'),
FieldPanel('description'),
]
def __unicode__(self):
parent_page = self.get_parent()
if parent_page.specific_class == CategoryGroupPage:
return str(parent_page) + ': ' + self.title
else:
return self.title
class Meta:
verbose_name = "Category"
ordering = ['title']
def get_context(self, request, *args, **kwargs):
"""
Add recipes to the context for recipe category listings
"""
context = super(CategoryPage, self).get_context(
request, *args, **kwargs)
recipes = self.get_recipes()
# Pagination
page = request.GET.get('page')
page_size = 10
from home.models import GeneralSettings
if GeneralSettings.for_site(request.site).pagination_count:
page_size = GeneralSettings.for_site(request.site).pagination_count
if page_size is not None:
paginator = Paginator(recipes, page_size)
try:
recipes = paginator.page(page)
except PageNotAnInteger:
recipes = paginator.page(1)
except EmptyPage:
recipes = paginator.page(paginator.num_pages)
context['recipes'] = recipes
return context
def get_recipes(self):
"""
Return all recipes if no subject specified, otherwise only those from that Subject
:param subject_filter: Subject
:return: QuerySet of Recipes (I think)
"""
recipes = RecipePage.objects.live()
recipes = recipes.filter(recipe_categories__category=self)
recipes = recipes.order_by('title')
return recipes
class CategoryGroupPage(Page):
"""
Categorization group (e.g., "meat" which has individual categories under it
Only categories are applied to recipes (not category groups), but recipes will show up under grouping
"""
parent_page_types = ['CategoryIndexPage']
subpage_types = [CategoryPage]
icon = models.ForeignKey(
'images.CustomImage',
null=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='This should be a square line-based icon in the right color'
)
content_panels = Page.content_panels + [
ImageChooserPanel('icon'),
]
class Meta:
verbose_name = "Category Group"
# TODO: Write function to list all categories
# TODO: Write funciton to list all recipes in category group
class CategoryIndexPage(Page):
"""
Top-level page (should only be one) under which to categorize recipes
"""
subpage_types = [CategoryPage, CategoryGroupPage]
class Meta:
verbose_name = "Recipe Categories Index"
def list_categories(self):
"""
List ALL categories
:return:
"""
return CategoryPage.objects.all()
# TODO: Category listing ignores hierarchy
class CategoryLink(Orderable):
page = ParentalKey('RecipePage', related_name='recipe_categories')
category = models.ForeignKey(
CategoryPage,
on_delete=models.SET_NULL,
null=True)
panels = [
PageChooserPanel('category')
]
class RecipePage(Page):
parent_page_types = ["RecipeIndexPage",]
subpage_types = []
# TODO: nutrition info
post_date = models.DateField(null=True)
main_image = models.ForeignKey(
'images.CustomImage',
null=True,
on_delete=models.SET_NULL,
related_name='+',
help_text='Image should be at least 1920x768 px'
)
intro = models.TextField(
max_length=250,
help_text='Appears above the recipe and on preview pages. '+md_format_help)
prep_time = models.DurationField(blank=True, null=True)
cook_time = models.DurationField(blank=True, null=True)
total_time = models.DurationField(blank=True, null=True)
recipe_yield = models.CharField(max_length=127, blank=True, verbose_name='Yield')
source_name = models.CharField(max_length=255, blank=True, verbose_name='Source')
source_url = models.URLField(blank=True)
ingredients = models.TextField(blank=True, help_text='One ingredient per line. Make separate sections with a square bracketed line like [section name]. '+md_format_help)
instructions = models.TextField(blank=True, help_text='Each new line generates a new numbered instruction. '+md_format_help)
notes = models.TextField(blank=True, help_text='Additional notes such as substitutions. '+md_format_help)
content_panels = Page.content_panels + [
FieldPanel('post_date'),
ImageChooserPanel('main_image'),
InlinePanel('recipe_categories', label='Categories'),
FieldPanel('intro'),
FieldRowPanel([
FieldPanel('prep_time', classname='col3'),
FieldPanel('cook_time', classname='col3'),
FieldPanel('total_time', classname='col3'),
FieldPanel('recipe_yield', classname='col3'),
], classname='label-above'),
FieldRowPanel([
FieldPanel('source_name', classname='col6'),
FieldPanel('source_url', classname='col6')
], classname='label-above'),
FieldPanel('ingredients'),
FieldPanel('notes'),
FieldPanel('instructions'),
#InlinePanel('instructions', label='Instructions'),
]
def format_ingredients(self):
"""
Format the ingredients field into Markdown, which can be formatted with make_markdown in the template
:return: String of Markdown-formatted ingredients
"""
lines = [format_ingredient_line(line) for line in self.ingredients.splitlines()]
return '\n'.join(lines)
def format_instructions(self):
"""
Format the instructions string into a Markdown ordered list, which can be formatted with make_markdown in the template
:return: String of Markdown-formatted instructions (ordered list)
"""
lines = self.instructions.splitlines()
for ind, line in enumerate(lines):
if len(line) > 0 and not line.isspace():
lines[ind] = '{}. {}'.format(ind, line)
lines = [line for line in lines if len(line) > 0 and not line.isspace()]
return '\n'.join(lines)
search_fields = Page.search_fields + [
index.SearchField('intro'),
index.SearchField('ingredients'),
index.SearchField('instructions'),
index.SearchField('notes'),
]
class Meta:
verbose_name = "Recipe"
def __unicode__(self):
return self.title
class RecipeIndexPage(Page):
"""
Root page under which all recipe pages are made.
There should only be one of these, at the top level
"""
subpage_types = ['RecipePage']
class Meta:
verbose_name = 'Recipes Index'
def get_context(self, request, *args, **kwargs):
"""
Add recipes to the context for recipe category listings
"""
context = super(RecipeIndexPage, self).get_context(
request, *args, **kwargs)
recipes = self.get_recipes()
# Pagination
page = request.GET.get('page')
page_size = 10
from home.models import GeneralSettings
if GeneralSettings.for_site(request.site).pagination_count:
page_size = GeneralSettings.for_site(request.site).pagination_count
if page_size is not None:
paginator = Paginator(recipes, page_size)
try:
recipes = paginator.page(page)
except PageNotAnInteger:
recipes = paginator.page(1)
except EmptyPage:
recipes = paginator.page(paginator.num_pages)
context['recipes'] = recipes
return context
def get_recipes(self):
"""
Return all recipes if no subject specified, otherwise only those from that Subject
:param subject_filter: Subject
:return: QuerySet of Recipes (I think)
"""
recipes = RecipePage.objects.live()
recipes = recipes.order_by('title')
return recipes
| [
"julia@juliaebert.com"
] | julia@juliaebert.com |
fdf6b8f440d6510ead6a221d98fac67e2c12d66f | 618acdb217694461304c1d4aa05655cc3769349d | /dtaidistance/util.py | a6d1f2f6f97dee52ec10051dcb6191d8876e3dcb | [
"Apache-2.0"
] | permissive | gene-fingerprinting/dtaidistance-2.0.6_F-distance | ec5e7a80657d89e71051e6037137ef7c9e78b65c | ed03980470213a7eb4cc6d5604aab0df81bcb510 | refs/heads/main | 2023-05-09T17:52:22.298492 | 2021-06-10T03:43:29 | 2021-06-10T03:43:29 | 374,305,823 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,297 | py | # -*- coding: UTF-8 -*-
"""
dtaidistance.util
~~~~~~~~~~~~~~~~~
Utility functions for DTAIDistance.
:author: Wannes Meert
:copyright: Copyright 2017-2018 KU Leuven, DTAI Research Group.
:license: Apache License, Version 2.0, see LICENSE for details.
"""
import os
import sys
import csv
import logging
from array import array
from pathlib import Path
import tempfile
try:
import numpy as np
except ImportError:
np = None
try:
from . import dtw_cc
except ImportError:
dtw_cc = None
try:
from . import dtw_cc_omp
except ImportError:
dtw_cc_omp = None
try:
from . import dtw_cc_numpy
except ImportError:
dtw_cc_numpy = None
logger = logging.getLogger("be.kuleuven.dtai.distance")
dtaidistance_dir = os.path.abspath(os.path.dirname(__file__))
def prepare_directory(directory=None):
"""Prepare the given directory, create it if necessary.
If no directory is given, a new directory will be created in the system's temp directory.
"""
if directory is not None:
directory = Path(directory)
if not directory.exists():
directory.mkdir(parents=True)
logger.debug("Using directory: {}".format(directory))
return Path(directory)
directory = tempfile.mkdtemp(prefix="dtaidistance_")
logger.debug("Using directory: {}".format(directory))
return Path(directory)
def read_substitution_matrix(file):
"""Read substitution matrix from file.
Comments starting with # and newlines are allowed anywhere
in the file.
:return: A dictionary mapping tuples of symbols to their weight.
"""
def strip_comments(reader):
for line in reader:
if not line.rstrip() or line[0] == '#':
continue
yield line.rstrip()
matrix = dict()
with open(file) as f:
reader = csv.reader(strip_comments(f), delimiter=" ", skipinitialspace=True)
line = next(reader)
idx = {i: symbol for i, symbol in enumerate(line)}
for line in reader:
symbol = line[0]
for j, value in enumerate(line[1:]):
matrix[(idx[j], symbol)] = float(value)
return matrix
class SeriesContainer:
def __init__(self, series):
"""Container for a list of series.
This wrapper class knows how to deal with multiple types of datastructures to represent
a list of sequences:
- List[array.array]
- List[numpy.array]
- List[List]
- numpy.array
- numpy.matrix
When using the C-based extensions, the data is automatically verified and converted.
"""
if isinstance(series, SeriesContainer):
self.series = series.series
elif np is not None and isinstance(series, np.ndarray) and len(series.shape) == 2:
# A matrix always returns a 2D array, also if you select one row (to be consistent
# and always be a matrix datastructure). The methods in this toolbox expect a
# 1D array thus we need to convert to a 1D or 2D array.
# self.series = [np.asarray(series[i]).reshape(-1) for i in range(series.shape[0])]
self.series = np.asarray(series, order="C")
elif type(series) == set or type(series) == tuple:
self.series = list(series)
else:
self.series = series
def c_data(self):
"""Return a datastructure that the C-component knows how to handle.
The method tries to avoid copying or reallocating memory.
:return: Either a list of buffers or a two-dimensional buffer. The
buffers are guaranteed to be C-contiguous and can thus be used
as regular pointer-based arrays in C.
"""
if dtw_cc is None:
raise Exception('C library not loaded')
if type(self.series) == list:
for i in range(len(self.series)):
serie = self.series[i]
if np is not None and isinstance(serie, np.ndarray):
if not serie.flags.c_contiguous:
serie = np.asarray(serie, order="C")
self.series[i] = serie
elif isinstance(serie, array):
pass
else:
raise Exception(
"Type of series not supported, "
"expected numpy.array or array.array but got {}".format(
type(serie)
)
)
return dtw_cc.dtw_series_from_data(self.series)
elif np is not None and isinstance(self.series, np.ndarray):
if not self.series.flags.c_contiguous:
logger.warning("Numpy array not C contiguous, copying data.")
self.series = self.series.copy(order="C")
if dtw_cc_numpy is None:
logger.warning("DTAIDistance C-extension for Numpy is not available. Proceeding anyway.")
return dtw_cc.dtw_series_from_data(self.series)
elif len(self.series.shape) == 3:
return dtw_cc_numpy.dtw_series_from_numpy_ndim(self.series)
else:
return dtw_cc_numpy.dtw_series_from_numpy(self.series)
return dtw_cc.dtw_series_from_data(self.series)
def get_max_y(self):
max_y = 0
if isinstance(self.series, np.ndarray) and len(self.series.shape) == 2:
max_y = max(np.max(self.series), abs(np.min(self.series)))
else:
for serie in self.series:
max_y = max(max_y, np.max(serie), abs(np.min(serie)))
return max_y
def __getitem__(self, item):
return self.series[item]
def __len__(self):
return len(self.series)
def __str__(self):
return "SeriesContainer:\n{}".format(self.series)
@staticmethod
def wrap(series):
if isinstance(series, SeriesContainer):
return series
return SeriesContainer(series)
def recompile():
import subprocess as sp
sp.run([sys.executable, "setup.py", "build_ext", "--inplace"], cwd=dtaidistance_dir)
def argmin(a):
imin, vmin = 0, float("inf")
for i, v in enumerate(a):
if v < vmin:
imin, vmin = i, v
return imin
| [
"noreply@github.com"
] | gene-fingerprinting.noreply@github.com |
c0e6eeb9cd6943b127592a9c8741958012b412f8 | 275f2566ac6804a80084a24954635539f6cf73d8 | /src/profiles_project/profiles_api/serializers.py | 931ae4592288c5aea110a3982a2b6ea69026871c | [] | no_license | sanyam1997/profiles_rest_api | 7bbefffb42fe6c0a25aaca98f8cea6b6ccbd2b9e | bd5534a2bd9749648077648ee2559fe6e422f820 | refs/heads/master | 2021-07-16T11:03:08.551830 | 2020-03-25T06:31:07 | 2020-03-25T06:31:07 | 249,887,008 | 0 | 0 | null | 2020-03-25T05:43:48 | 2020-03-25T04:30:02 | Python | UTF-8 | Python | false | false | 1,166 | py | from rest_framework import serializers
from . import models
class HelloSerializer( serializers.Serializer ) :
"""Serializes a name field for testing out APIView."""
name = serializers.CharField( max_length = 10 )
class UserProfileSerializer( serializers.ModelSerializer ) :
"""A serializer for our user profile objects."""
class Meta :
model = models.UserProfile
fields = ( 'id' , 'email' , 'name' , 'password' )
extra_kwargs = { 'password' : { 'write_only' : True } }
def create( self , validated_data ) :
"""Create and return a new user."""
user = models.UserProfile(
email = validated_data[ 'email' ] ,
name = validated_data[ 'name' ]
)
user.set_password( validated_data[ 'password' ] )
user.save( )
return user
#
# class ProfileFeedItemSerializer( serializers.ModelSerializer ) :
# """A serializer for profile feed items."""
#
# class Meta :
# model = models.ProfileFeedItem
# fields = ( 'id' , 'user_profile' , 'status_text' , 'created_on' )
# extra_kwargs = { 'user_profile' : { 'read_only' : True } }
| [
"srajpal@iu.edu"
] | srajpal@iu.edu |
cb49c1a45a045db2495be4fdf0417776fe56a168 | 26f50ed2357ca2949251547f70341fa80397100c | /public/tests/test.py | 99d5b8d52c926380251334265802d71ae79468d0 | [] | no_license | Sailesh-2209/sudoku | 29359e7d6b858f413546d07eddbc580d96447ca6 | cedeee6dc451f9f8501e4821629261dfdc26d3d7 | refs/heads/master | 2023-04-04T10:24:45.290087 | 2021-04-08T11:22:17 | 2021-04-08T11:22:17 | 354,564,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,623 | py | import random
number_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
boardArray = []
for i in range(9):
boardArray.append([0] * 9)
def find_neighbours(row_index, column_index):
neighbours = []
if row_index == 0 or row_index == 1 or row_index == 2:
if column_index == 0 or column_index == 1 or column_index == 2:
for i in range(3):
for j in range(3):
if (i == row_index and j == column_index):
continue
else:
neighbours.append([i, j])
elif column_index == 3 or column_index == 4 or column_index == 5:
for i in range(3):
for j in range(3, 6):
if (i == row_index and j == column_index):
continue
else:
neighbours.append([i, j])
elif column_index == 6 or column_index == 7 or column_index == 8:
for i in range(3):
for j in range(6, 9):
if (i == row_index and j == column_index):
continue
else:
neighbours.append([i, j])
elif row_index == 3 or row_index == 4 or row_index == 5:
if column_index == 0 or column_index == 1 or column_index == 2:
for i in range(3, 6):
for j in range(3):
if (i == row_index and j == column_index):
continue
else:
neighbours.append([i, j])
elif column_index == 3 or column_index == 4 or column_index == 5:
for i in range(3, 6):
for j in range(3, 6):
if (i == row_index and j == column_index):
continue
else:
neighbours.append([i, j])
elif column_index == 6 or column_index == 7 or column_index == 8:
for i in range(3, 6):
for j in range(6, 9):
if (i == row_index and j == column_index):
continue
else:
neighbours.append([i, j])
elif row_index == 6 or row_index == 7 or row_index == 8:
if column_index == 0 or column_index == 1 or column_index == 2:
for i in range(6, 9):
for j in range(3):
if (i == row_index and j == column_index):
continue
else:
neighbours.append([i, j])
elif column_index == 3 or column_index == 4 or column_index == 5:
for i in range(6, 9):
for j in range(3, 6):
if (i == row_index and j == column_index):
continue
else:
neighbours.append([i, j])
elif column_index == 6 or column_index == 7 or column_index == 8:
for i in range(6, 9):
for j in range(6, 9):
if (i == row_index and j == column_index):
continue
else:
neighbours.append([i, j])
return neighbours
def is_valid(row_index, column_index, number, array):
neighbours = find_neighbours(row_index, column_index)
for item in neighbours:
if array[item[0]][item[1]] == number:
return False
for i in range(9):
if array[row_index][i] == number:
return False
for i in range(9):
if array[i][column_index] == number:
return False
return True
def generate_random_number():
return random.choice(number_list)
def generate_board():
new_board_array = boardArray
for i in range(9):
for j in range(9):
check_list = []
while True:
number = generate_random_number()
if (number not in check_list):
check_list.append(number)
if is_valid(i, j, number, new_board_array):
new_board_array[i][j] = number
break
test = 1 in check_list and 2 in check_list and 3 in check_list and 4 in check_list and 5 in check_list and 6 in check_list and 7 in check_list and 8 in check_list
if (test):
break
return new_board_array
def display_board(array):
for item in array:
for meta_item in item:
print(' ' + str(meta_item) + ' ' + '|', end='')
print()
print('-'*36)
display_board(generate_board()) | [
"saileshr2209@gmail.com"
] | saileshr2209@gmail.com |
e1516bbfce063e8d56341ca439e8cf70dfc77eed | 2b5fd9d436a97726f852a12bab58b8d367f4866a | /api/urls.py | 2a552766f2d17ea023c0ec9ea230e41593ce2a2f | [] | no_license | lxlzyf/roe | 07ff551b142c0411acb7ca6f759ea98b40ad9b72 | 2d7f1b01e2456875d14a75c90d8397965215bcd3 | refs/heads/master | 2020-03-27T06:00:43.587235 | 2018-08-20T10:47:47 | 2018-08-20T10:47:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | from django.conf.urls import url
from api.views import user_api
urlpatterns = [
url(r'^group/$', assets_api.group_list),
url(r'^group/(?P<id>[0-9]+)/$',assets_api.group_detail),
url(r'^user/$', user_api.user_list),
url(r'^user/(?P<id>[0-9]+)/$',user_api.user_detail),
] | [
"flc009@163.com"
] | flc009@163.com |
ca8e09851c924ccc0848f7fda9b5884cf3a62e0f | 093549e2949267dddbaabf13c1657e2d9e795858 | /gym_multi_envs/wrappers/multi_monitoring.py | 2db1785297f1ebd1ff4f1157286e2babb78576c0 | [] | no_license | bde-slither/gym_pygame_envs | a67b0f249ff984b5c1fa46261bb9d134c532eb9c | 2dedb5f681c34aa9b6dad93c6bf0dd110e6b8301 | refs/heads/master | 2021-09-13T14:12:16.846194 | 2018-04-16T05:51:45 | 2018-04-16T05:51:45 | 122,126,706 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 728 | py | """This file is a copy of multi_monitering.py from OpenAI MultiAgent compitition repo
Source:
https://github.com/openai/multiagent-competition.git"""
from gym.wrappers import Monitor
class MultiMonitor(Monitor):
def _before_step(self, action):
return
def _after_step(self, observation, reward, done, info):
if not self.enabled: return done
if done[0] and self.env_semantics_autoreset:
# For envs with BlockingReset wrapping VNCEnv, this observation will be the first one of the new episode
self._reset_video_recorder()
self.episode_id += 1
self._flush()
# Record video
self.video_recorder.capture_frame()
return done
| [
"srajanpaliwal@gmail.com"
] | srajanpaliwal@gmail.com |
bec43fe9a76a7e3c073462bb485746b7c24e7709 | 1e911d88cef46e9292ab893cfb61b72a30cc2bbd | /parser/__init__.py | 5f68269635806b782da514cf6d5e539a4865b81a | [
"MIT"
] | permissive | karthikdevel/fit | e7dad82ff626d4213ad6762667c556c87d198aa5 | 0d5e9c87ee34d9ca38f0838777614f5fb0205cb6 | refs/heads/master | 2021-03-12T20:41:29.967828 | 2014-07-20T06:19:03 | 2014-07-20T06:19:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17 | py | import topparser
| [
"karthikdevel@gmail.com"
] | karthikdevel@gmail.com |
22126b447591b464ad5a6d753bb645c15ea5ed06 | 531f8027890188eb037a9dbe68d63882eb2e0ead | /demos/ebeam/flash/flash_mismatch.py | 77b5f709db45ef41f935bc5ad434b0e1d972c21e | [] | no_license | Cartj/desy | 057947dd5e3e4fce085472dc145461cea68be8e9 | 9a1f12e7cf7040e28614e95dc5c49bc10d36b092 | refs/heads/master | 2020-03-21T06:01:54.315274 | 2016-08-16T13:04:56 | 2016-08-16T13:04:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | __author__ = 'Sergey Tomin'
from ocelot import *
from ocelot.gui import *
from pylab import *
exec( open("lattice_FLASH_S2E.py" ))
beam = Beam()
beam.E = 148.3148e-3 #in GeV ?!
beam.beta_x = 14.8821
beam.beta_y = 18.8146
beam.alpha_x = -0.61309
beam.alpha_y = -0.54569
beam.emit_xn = 1.5e-6
beam.emit_yn = 1.5e-6
beam.emit_x = beam.emit_xn / (beam.E / m_e_GeV)
beam.emit_y = beam.emit_yn / (beam.E / m_e_GeV)
beam.tlen=2e-3 # in m
tw0 = Twiss(beam)
lat = MagneticLattice(lattice)
tws_m=twiss(lat, tw0, nPoints=None)
plot_opt_func(lat, tws_m, top_plot = ["Dx", "Dy"], fig_name="optics")
#plt.show()
mx = 1.
my = 1.
Mx_b = []
My_b = []
S = []
for elem, tws in zip(lat.sequence,tws_m[1:]):
dk = 0.
if elem.type == "quadrupole":
dk_k = -0.05
#if elem.id in ["Q8TCOL", "Q2UBC3", "Q6DBC2"]:
# dk_k = np.random.rand()/100.
dk = dk_k*elem.k1
elem.k1 = elem.k1*(1. + dk_k)
mx += 0.5*((dk*elem.l*tws.beta_x*cos(2*tws.mux))**2 + (dk*elem.l*tws.beta_x*sin(2*tws.mux))**2)
my += 0.5*((dk*elem.l*tws.beta_y*cos(2*tws.muy))**2 + (dk*elem.l*tws.beta_y*sin(2*tws.muy))**2)
Mx_b.append(mx)
My_b.append(my)
S.append(tws.s)
lat = MagneticLattice(lattice)
tws_e=twiss(lat, tw0, nPoints=None)
t = tw0
x = linspace(-sqrt(t.beta_x-1e-7), sqrt(t.beta_x-1e-7), num=200)
#print t.beta_x - x*x
x1 = (sqrt(t.beta_x - x*x) - t.alpha_x*x)/t.beta_x
x2 = (-sqrt(t.beta_x - x*x) - t.alpha_x*x)/t.beta_x
a = sqrt(0.5*((t.beta_x + t.gamma_x) + sqrt((t.beta_x + t.gamma_x)**2 - 4.)))
theta = arctan(-2.*t.alpha_x/(t.beta_x - t.gamma_x))/2.
t = linspace(0, 2*pi, num=100)
xe = a*cos(t)*cos(theta) - 1./a*sin(t)*sin(theta)
ye = a*cos(t)*sin(theta) + 1./a*sin(t)*cos(theta)
plt.plot(x, x1, x, x2)
plt.plot(xe, ye)
plt.show()
Mx = []
My = []
Mx2 = []
My2 = []
for tm, te in zip(tws_m, tws_e):
bx_n = te.beta_x/tm.beta_x
by_n = te.beta_y/tm.beta_y
ax_n = -te.alpha_x + tm.alpha_x*bx_n
ay_n = -te.alpha_y + tm.alpha_y*by_n
gx_n = -2.*te.alpha_x*tm.alpha_x + tm.alpha_x**2*bx_n + tm.beta_x*te.gamma_x
gy_n = -2.*te.alpha_y*tm.alpha_y + tm.alpha_y**2*by_n + tm.beta_y*te.gamma_y
mx = 0.5*(bx_n + gx_n) + sqrt((bx_n + gx_n)**2 - 4.)
#print (by_n + gy_n)**2 - 4.
my = 0.5*(by_n + gy_n) + sqrt((by_n + gy_n)**2 - 4.)
Mx.append(sqrt(mx))
My.append(sqrt(my))
Mx2.append(sqrt(0.5*(tm.beta_x*te.gamma_x - 2.*te.alpha_x*tm.alpha_x + te.beta_x*tm.gamma_x)))
My2.append(sqrt(0.5*(tm.beta_y*te.gamma_y - 2.*te.alpha_y*tm.alpha_y + te.beta_y*tm.gamma_y)))
s = [p.s for p in tws_m]
bx_e = [p.beta_x for p in tws_e]
bx_m = [p.beta_x for p in tws_m]
plt.plot(s, bx_m,"r", s, bx_e, "b")
plt.show()
plt.plot(s, Mx, "r", s, My, "b")
#plt.plot(s, Mx2, "r.", s, My2, "b.")
plt.plot(S, Mx_b, "ro-", S, My_b, "bo-")
plt.show()
| [
"tomin.sergey@gmail.com"
] | tomin.sergey@gmail.com |
1e6316a99db3a784c21de857efb96255ffe33f3e | c49a19fc8846101c9ff6a9a127550311b64b2d47 | /tests/test_InsertStatement.py | 5c6bfbd506c87345f8c902613665bc0a29a5c164 | [] | no_license | EvanGrill/CS457_Database_Management_System_Architecture | 5c201c67d330561c255387ec328ac17e1e248991 | e3f60ca88d4af13162bf608d798c9aa6d8be29af | refs/heads/master | 2020-03-18T16:57:19.296577 | 2018-06-20T02:57:11 | 2018-06-20T02:57:11 | 134,995,679 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py |
import unittest
import config
from InsertStatement import InsertStatement
class TestInsertStatement(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_execute(self):
pass
if __name__ == '__main__':
unittest.main()
| [
"grille@me.com"
] | grille@me.com |
4778c6986b6120a7ef560780ffc43c77d358ed22 | 4c9580b2e09e2b000e27a1c9021b12cf2747f56a | /chapter13/xiaoyu_mall/xiaoyu_mall/apps/areas/migrations/0001_initial.py | 079ebb7f05049decffb2551a21f8dbc383e69e82 | [] | no_license | jzplyy/xiaoyue_mall | 69072c0657a6878a4cf799b8c8218cc7d88c8d12 | 4f9353d6857d1bd7dc54151ca8b34dcb4671b8dc | refs/heads/master | 2023-06-26T02:48:03.103635 | 2021-07-22T15:51:07 | 2021-07-22T15:51:07 | 388,514,311 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | # Generated by Django 2.2.3 on 2019-11-15 06:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=20, verbose_name='名称')),
('parent', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='subs', to='areas.Area', verbose_name='上级行政区划')),
],
options={
'verbose_name': '省市区',
'verbose_name_plural': '省市区',
'db_table': 'tb_areas',
},
),
]
| [
"jzplyy@126.com"
] | jzplyy@126.com |
aaa181dee0af914a8a8cbeccec8f6850df142d4a | 93713f46f16f1e29b725f263da164fed24ebf8a8 | /Library/lib/python3.7/site-packages/docutils/writers/html4css1/__init__.py | f87a1055b0c23f9253762d020667a7431458732a | [
"BSD-3-Clause"
] | permissive | holzschu/Carnets | b83d15136d25db640cea023abb5c280b26a9620e | 1ad7ec05fb1e3676ac879585296c513c3ee50ef9 | refs/heads/master | 2023-02-20T12:05:14.980685 | 2023-02-13T15:59:23 | 2023-02-13T15:59:23 | 167,671,526 | 541 | 36 | BSD-3-Clause | 2022-11-29T03:08:22 | 2019-01-26T09:26:46 | Python | UTF-8 | Python | false | false | 33,869 | py | # $Id: __init__.py 8035 2017-02-13 22:01:47Z milde $
# Author: David Goodger
# Maintainer: docutils-develop@lists.sourceforge.net
# Copyright: This module has been placed in the public domain.
"""
Simple HyperText Markup Language document tree Writer.
The output conforms to the XHTML version 1.0 Transitional DTD
(*almost* strict). The output contains a minimum of formatting
information. The cascading style sheet "html4css1.css" is required
for proper viewing with a modern graphical browser.
"""
__docformat__ = 'reStructuredText'
import os.path
import docutils
from docutils import frontend, nodes, writers, io
from docutils.transforms import writer_aux
from docutils.writers import _html_base
class Writer(writers._html_base.Writer):
supported = ('html', 'html4', 'html4css1', 'xhtml', 'xhtml10')
"""Formats this writer supports."""
default_stylesheets = ['html4css1.css']
default_stylesheet_dirs = ['.',
os.path.abspath(os.path.dirname(__file__)),
# for math.css
os.path.abspath(os.path.join(
os.path.dirname(os.path.dirname(__file__)), 'html5_polyglot'))
]
default_template = 'template.txt'
default_template_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), default_template)
settings_spec = (
'HTML-Specific Options',
None,
(('Specify the template file (UTF-8 encoded). Default is "%s".'
% default_template_path,
['--template'],
{'default': default_template_path, 'metavar': '<file>'}),
('Comma separated list of stylesheet URLs. '
'Overrides previous --stylesheet and --stylesheet-path settings.',
['--stylesheet'],
{'metavar': '<URL[,URL,...]>', 'overrides': 'stylesheet_path',
'validator': frontend.validate_comma_separated_list}),
('Comma separated list of stylesheet paths. '
'Relative paths are expanded if a matching file is found in '
'the --stylesheet-dirs. With --link-stylesheet, '
'the path is rewritten relative to the output HTML file. '
'Default: "%s"' % ','.join(default_stylesheets),
['--stylesheet-path'],
{'metavar': '<file[,file,...]>', 'overrides': 'stylesheet',
'validator': frontend.validate_comma_separated_list,
'default': default_stylesheets}),
('Embed the stylesheet(s) in the output HTML file. The stylesheet '
'files must be accessible during processing. This is the default.',
['--embed-stylesheet'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Link to the stylesheet(s) in the output HTML file. '
'Default: embed stylesheets.',
['--link-stylesheet'],
{'dest': 'embed_stylesheet', 'action': 'store_false'}),
('Comma-separated list of directories where stylesheets are found. '
'Used by --stylesheet-path when expanding relative path arguments. '
'Default: "%s"' % default_stylesheet_dirs,
['--stylesheet-dirs'],
{'metavar': '<dir[,dir,...]>',
'validator': frontend.validate_comma_separated_list,
'default': default_stylesheet_dirs}),
('Specify the initial header level. Default is 1 for "<h1>". '
'Does not affect document title & subtitle (see --no-doc-title).',
['--initial-header-level'],
{'choices': '1 2 3 4 5 6'.split(), 'default': '1',
'metavar': '<level>'}),
('Specify the maximum width (in characters) for one-column field '
'names. Longer field names will span an entire row of the table '
'used to render the field list. Default is 14 characters. '
'Use 0 for "no limit".',
['--field-name-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Specify the maximum width (in characters) for options in option '
'lists. Longer options will span an entire row of the table used '
'to render the option list. Default is 14 characters. '
'Use 0 for "no limit".',
['--option-limit'],
{'default': 14, 'metavar': '<level>',
'validator': frontend.validate_nonnegative_int}),
('Format for footnote references: one of "superscript" or '
'"brackets". Default is "brackets".',
['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'brackets',
'metavar': '<format>',
'overrides': 'trim_footnote_reference_space'}),
('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'],
{'choices': ['dash', 'parentheses', 'parens', 'none'],
'default': 'dash', 'metavar': '<format>'}),
('Remove extra vertical whitespace between items of "simple" bullet '
'lists and enumerated lists. Default: enabled.',
['--compact-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple bullet and enumerated lists.',
['--no-compact-lists'],
{'dest': 'compact_lists', 'action': 'store_false'}),
('Remove extra vertical whitespace between items of simple field '
'lists. Default: enabled.',
['--compact-field-lists'],
{'default': 1, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Disable compact simple field lists.',
['--no-compact-field-lists'],
{'dest': 'compact_field_lists', 'action': 'store_false'}),
('Added to standard table classes. '
'Defined styles: "borderless". Default: ""',
['--table-style'],
{'default': ''}),
('Math output format, one of "MathML", "HTML", "MathJax" '
'or "LaTeX". Default: "HTML math.css"',
['--math-output'],
{'default': 'HTML math.css'}),
('Omit the XML declaration. Use with caution.',
['--no-xml-declaration'],
{'dest': 'xml_declaration', 'default': 1, 'action': 'store_false',
'validator': frontend.validate_boolean}),
('Obfuscate email addresses to confuse harvesters while still '
'keeping email links usable with standards-compliant browsers.',
['--cloak-email-addresses'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),))
config_section = 'html4css1 writer'
def __init__(self):
self.parts = {}
self.translator_class = HTMLTranslator
class HTMLTranslator(writers._html_base.HTMLTranslator):
"""
The html4css1 writer has been optimized to produce visually compact
lists (less vertical whitespace). HTML's mixed content models
allow list items to contain "<li><p>body elements</p></li>" or
"<li>just text</li>" or even "<li>text<p>and body
elements</p>combined</li>", each with different effects. It would
be best to stick with strict body elements in list items, but they
affect vertical spacing in older browsers (although they really
shouldn't).
The html5_polyglot writer solves this using CSS2.
Here is an outline of the optimization:
- Check for and omit <p> tags in "simple" lists: list items
contain either a single paragraph, a nested simple list, or a
paragraph followed by a nested simple list. This means that
this list can be compact:
- Item 1.
- Item 2.
But this list cannot be compact:
- Item 1.
This second paragraph forces space between list items.
- Item 2.
- In non-list contexts, omit <p> tags on a paragraph if that
paragraph is the only child of its parent (footnotes & citations
are allowed a label first).
- Regardless of the above, in definitions, table cells, field bodies,
option descriptions, and list items, mark the first child with
'class="first"' and the last child with 'class="last"'. The stylesheet
sets the margins (top & bottom respectively) to 0 for these elements.
The ``no_compact_lists`` setting (``--no-compact-lists`` command-line
option) disables list whitespace optimization.
"""
# The following definitions are required for display in browsers limited
# to CSS1 or backwards compatible behaviour of the writer:
doctype = (
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
content_type = ('<meta http-equiv="Content-Type"'
' content="text/html; charset=%s" />\n')
content_type_mathml = ('<meta http-equiv="Content-Type"'
' content="application/xhtml+xml; charset=%s" />\n')
# encode also non-breaking space
special_characters = dict(_html_base.HTMLTranslator.special_characters)
special_characters[0xa0] = ' '
# use character reference for dash (not valid in HTML5)
attribution_formats = {'dash': ('—', ''),
'parentheses': ('(', ')'),
'parens': ('(', ')'),
'none': ('', '')}
# ersatz for first/last pseudo-classes missing in CSS1
def set_first_last(self, node):
self.set_class_on_child(node, 'first', 0)
self.set_class_on_child(node, 'last', -1)
# add newline after opening tag
def visit_address(self, node):
self.visit_docinfo_item(node, 'address', meta=False)
self.body.append(self.starttag(node, 'pre', CLASS='address'))
# ersatz for first/last pseudo-classes
def visit_admonition(self, node):
node['classes'].insert(0, 'admonition')
self.body.append(self.starttag(node, 'div'))
self.set_first_last(node)
# author, authors: use <br> instead of paragraphs
def visit_author(self, node):
if isinstance(node.parent, nodes.authors):
if self.author_in_authors:
self.body.append('\n<br />')
else:
self.visit_docinfo_item(node, 'author')
def depart_author(self, node):
if isinstance(node.parent, nodes.authors):
self.author_in_authors = True
else:
self.depart_docinfo_item()
def visit_authors(self, node):
self.visit_docinfo_item(node, 'authors')
self.author_in_authors = False # initialize
def depart_authors(self, node):
self.depart_docinfo_item()
# use "width" argument insted of "style: 'width'":
def visit_colspec(self, node):
self.colspecs.append(node)
# "stubs" list is an attribute of the tgroup element:
node.parent.stubs.append(node.attributes.get('stub'))
#
def depart_colspec(self, node):
# write out <colgroup> when all colspecs are processed
if isinstance(node.next_node(descend=False, siblings=True),
nodes.colspec):
return
if 'colwidths-auto' in node.parent.parent['classes'] or (
'colwidths-auto' in self.settings.table_style and
('colwidths-given' not in node.parent.parent['classes'])):
return
total_width = sum(node['colwidth'] for node in self.colspecs)
self.body.append(self.starttag(node, 'colgroup'))
for node in self.colspecs:
colwidth = int(node['colwidth'] * 100.0 / total_width + 0.5)
self.body.append(self.emptytag(node, 'col',
width='%i%%' % colwidth))
self.body.append('</colgroup>\n')
# Compact lists:
# exclude definition lists and field lists (non-compact by default)
def is_compactable(self, node):
return ('compact' in node['classes']
or (self.settings.compact_lists
and 'open' not in node['classes']
and (self.compact_simple
or self.topic_classes == ['contents']
# TODO: self.in_contents
or self.check_simple_list(node))))
# citations: Use table for bibliographic references.
def visit_citation(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils citation',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def depart_citation(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
# insert classifier-delimiter (not required with CSS2)
def visit_classifier(self, node):
self.body.append(' <span class="classifier-delimiter">:</span> ')
self.body.append(self.starttag(node, 'span', '', CLASS='classifier'))
# ersatz for first/last pseudo-classes
def visit_definition(self, node):
self.body.append('</dt>\n')
self.body.append(self.starttag(node, 'dd', ''))
self.set_first_last(node)
# don't add "simple" class value
def visit_definition_list(self, node):
self.body.append(self.starttag(node, 'dl', CLASS='docutils'))
# use a table for description lists
def visit_description(self, node):
self.body.append(self.starttag(node, 'td', ''))
self.set_first_last(node)
def depart_description(self, node):
self.body.append('</td>')
# use table for docinfo
def visit_docinfo(self, node):
self.context.append(len(self.body))
self.body.append(self.starttag(node, 'table',
CLASS='docinfo',
frame="void", rules="none"))
self.body.append('<col class="docinfo-name" />\n'
'<col class="docinfo-content" />\n'
'<tbody valign="top">\n')
self.in_docinfo = True
def depart_docinfo(self, node):
self.body.append('</tbody>\n</table>\n')
self.in_docinfo = False
start = self.context.pop()
self.docinfo = self.body[start:]
self.body = []
def visit_docinfo_item(self, node, name, meta=True):
if meta:
meta_tag = '<meta name="%s" content="%s" />\n' \
% (name, self.attval(node.astext()))
self.add_meta(meta_tag)
self.body.append(self.starttag(node, 'tr', ''))
self.body.append('<th class="docinfo-name">%s:</th>\n<td>'
% self.language.labels[name])
if len(node):
if isinstance(node[0], nodes.Element):
node[0]['classes'].append('first')
if isinstance(node[-1], nodes.Element):
node[-1]['classes'].append('last')
def depart_docinfo_item(self):
self.body.append('</td></tr>\n')
# add newline after opening tag
def visit_doctest_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='doctest-block'))
# insert an NBSP into empty cells, ersatz for first/last
def visit_entry(self, node):
writers._html_base.HTMLTranslator.visit_entry(self, node)
if len(node) == 0: # empty cell
self.body.append(' ')
self.set_first_last(node)
# ersatz for first/last pseudo-classes
def visit_enumerated_list(self, node):
"""
The 'start' attribute does not conform to HTML 4.01's strict.dtd, but
cannot be emulated in CSS1 (HTML 5 reincludes it).
"""
atts = {}
if 'start' in node:
atts['start'] = node['start']
if 'enumtype' in node:
atts['class'] = node['enumtype']
# @@@ To do: prefix, suffix. How? Change prefix/suffix to a
# single "format" attribute? Use CSS2?
old_compact_simple = self.compact_simple
self.context.append((self.compact_simple, self.compact_p))
self.compact_p = None
self.compact_simple = self.is_compactable(node)
if self.compact_simple and not old_compact_simple:
atts['class'] = (atts.get('class', '') + ' simple').strip()
self.body.append(self.starttag(node, 'ol', **atts))
def depart_enumerated_list(self, node):
self.compact_simple, self.compact_p = self.context.pop()
self.body.append('</ol>\n')
# use table for field-list:
def visit_field(self, node):
self.body.append(self.starttag(node, 'tr', '', CLASS='field'))
def depart_field(self, node):
self.body.append('</tr>\n')
def visit_field_body(self, node):
self.body.append(self.starttag(node, 'td', '', CLASS='field-body'))
self.set_class_on_child(node, 'first', 0)
field = node.parent
if (self.compact_field_list or
isinstance(field.parent, nodes.docinfo) or
field.parent.index(field) == len(field.parent) - 1):
# If we are in a compact list, the docinfo, or if this is
# the last field of the field list, do not add vertical
# space after last element.
self.set_class_on_child(node, 'last', -1)
def depart_field_body(self, node):
self.body.append('</td>\n')
def visit_field_list(self, node):
self.context.append((self.compact_field_list, self.compact_p))
self.compact_p = None
if 'compact' in node['classes']:
self.compact_field_list = True
elif (self.settings.compact_field_lists
and 'open' not in node['classes']):
self.compact_field_list = True
if self.compact_field_list:
for field in node:
field_body = field[-1]
assert isinstance(field_body, nodes.field_body)
children = [n for n in field_body
if not isinstance(n, nodes.Invisible)]
if not (len(children) == 0 or
len(children) == 1 and
isinstance(children[0],
(nodes.paragraph, nodes.line_block))):
self.compact_field_list = False
break
self.body.append(self.starttag(node, 'table', frame='void',
rules='none',
CLASS='docutils field-list'))
self.body.append('<col class="field-name" />\n'
'<col class="field-body" />\n'
'<tbody valign="top">\n')
def depart_field_list(self, node):
self.body.append('</tbody>\n</table>\n')
self.compact_field_list, self.compact_p = self.context.pop()
def visit_field_name(self, node):
atts = {}
if self.in_docinfo:
atts['class'] = 'docinfo-name'
else:
atts['class'] = 'field-name'
if ( self.settings.field_name_limit
and len(node.astext()) > self.settings.field_name_limit):
atts['colspan'] = 2
self.context.append('</tr>\n'
+ self.starttag(node.parent, 'tr', '',
CLASS='field')
+ '<td> </td>')
else:
self.context.append('')
self.body.append(self.starttag(node, 'th', '', **atts))
def depart_field_name(self, node):
self.body.append(':</th>')
self.body.append(self.context.pop())
# use table for footnote text
def visit_footnote(self, node):
self.body.append(self.starttag(node, 'table',
CLASS='docutils footnote',
frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<tbody valign="top">\n'
'<tr>')
self.footnote_backrefs(node)
def footnote_backrefs(self, node):
backlinks = []
backrefs = node['backrefs']
if self.settings.footnote_backlinks and backrefs:
if len(backrefs) == 1:
self.context.append('')
self.context.append('</a>')
self.context.append('<a class="fn-backref" href="#%s">'
% backrefs[0])
else:
# Python 2.4 fails with enumerate(backrefs, 1)
for (i, backref) in enumerate(backrefs):
backlinks.append('<a class="fn-backref" href="#%s">%s</a>'
% (backref, i+1))
self.context.append('<em>(%s)</em> ' % ', '.join(backlinks))
self.context += ['', '']
else:
self.context.append('')
self.context += ['', '']
# If the node does not only consist of a label.
if len(node) > 1:
# If there are preceding backlinks, we do not set class
# 'first', because we need to retain the top-margin.
if not backlinks:
node[1]['classes'].append('first')
node[-1]['classes'].append('last')
def depart_footnote(self, node):
self.body.append('</td></tr>\n'
'</tbody>\n</table>\n')
# insert markers in text as pseudo-classes are not supported in CSS1:
def visit_footnote_reference(self, node):
href = '#' + node['refid']
format = self.settings.footnote_references
if format == 'brackets':
suffix = '['
self.context.append(']')
else:
assert format == 'superscript'
suffix = '<sup>'
self.context.append('</sup>')
self.body.append(self.starttag(node, 'a', suffix,
CLASS='footnote-reference', href=href))
def depart_footnote_reference(self, node):
self.body.append(self.context.pop() + '</a>')
# just pass on generated text
def visit_generated(self, node):
pass
# Image types to place in an <object> element
# SVG not supported by IE up to version 8
# (html4css1 strives for IE6 compatibility)
object_image_types = {'.svg': 'image/svg+xml',
'.swf': 'application/x-shockwave-flash'}
# use table for footnote text,
# context added in footnote_backrefs.
def visit_label(self, node):
self.body.append(self.starttag(node, 'td', '%s[' % self.context.pop(),
CLASS='label'))
def depart_label(self, node):
self.body.append(']%s</td><td>%s' % (self.context.pop(), self.context.pop()))
# ersatz for first/last pseudo-classes
def visit_list_item(self, node):
self.body.append(self.starttag(node, 'li', ''))
if len(node):
node[0]['classes'].append('first')
# use <tt> (not supported by HTML5),
# cater for limited styling options in CSS1 using hard-coded NBSPs
def visit_literal(self, node):
# special case: "code" role
classes = node.get('classes', [])
if 'code' in classes:
# filter 'code' from class arguments
node['classes'] = [cls for cls in classes if cls != 'code']
self.body.append(self.starttag(node, 'code', ''))
return
self.body.append(
self.starttag(node, 'tt', '', CLASS='docutils literal'))
text = node.astext()
for token in self.words_and_spaces.findall(text):
if token.strip():
# Protect text like "--an-option" and the regular expression
# ``[+]?(\d+(\.\d*)?|\.\d+)`` from bad line wrapping
if self.in_word_wrap_point.search(token):
self.body.append('<span class="pre">%s</span>'
% self.encode(token))
else:
self.body.append(self.encode(token))
elif token in ('\n', ' '):
# Allow breaks at whitespace:
self.body.append(token)
else:
# Protect runs of multiple spaces; the last space can wrap:
self.body.append(' ' * (len(token) - 1) + ' ')
self.body.append('</tt>')
# Content already processed:
raise nodes.SkipNode
# add newline after opening tag, don't use <code> for code
def visit_literal_block(self, node):
self.body.append(self.starttag(node, 'pre', CLASS='literal-block'))
# add newline
def depart_literal_block(self, node):
self.body.append('\n</pre>\n')
# use table for option list
def visit_option_group(self, node):
atts = {}
if ( self.settings.option_limit
and len(node.astext()) > self.settings.option_limit):
atts['colspan'] = 2
self.context.append('</tr>\n<tr><td> </td>')
else:
self.context.append('')
self.body.append(
self.starttag(node, 'td', CLASS='option-group', **atts))
self.body.append('<kbd>')
self.context.append(0) # count number of options
def depart_option_group(self, node):
self.context.pop()
self.body.append('</kbd></td>\n')
self.body.append(self.context.pop())
def visit_option_list(self, node):
self.body.append(
self.starttag(node, 'table', CLASS='docutils option-list',
frame="void", rules="none"))
self.body.append('<col class="option" />\n'
'<col class="description" />\n'
'<tbody valign="top">\n')
def depart_option_list(self, node):
self.body.append('</tbody>\n</table>\n')
def visit_option_list_item(self, node):
self.body.append(self.starttag(node, 'tr', ''))
def depart_option_list_item(self, node):
self.body.append('</tr>\n')
# Omit <p> tags to produce visually compact lists (less vertical
# whitespace) as CSS styling requires CSS2.
def should_be_compact_paragraph(self, node):
"""
Determine if the <p> tags around paragraph ``node`` can be omitted.
"""
if (isinstance(node.parent, nodes.document) or
isinstance(node.parent, nodes.compound)):
# Never compact paragraphs in document or compound.
return False
for key, value in node.attlist():
if (node.is_not_default(key) and
not (key == 'classes' and value in
([], ['first'], ['last'], ['first', 'last']))):
# Attribute which needs to survive.
return False
first = isinstance(node.parent[0], nodes.label) # skip label
for child in node.parent.children[first:]:
# only first paragraph can be compact
if isinstance(child, nodes.Invisible):
continue
if child is node:
break
return False
parent_length = len([n for n in node.parent if not isinstance(
n, (nodes.Invisible, nodes.label))])
if ( self.compact_simple
or self.compact_field_list
or self.compact_p and parent_length == 1):
return True
return False
def visit_paragraph(self, node):
if self.should_be_compact_paragraph(node):
self.context.append('')
else:
self.body.append(self.starttag(node, 'p', ''))
self.context.append('</p>\n')
def depart_paragraph(self, node):
self.body.append(self.context.pop())
# ersatz for first/last pseudo-classes
def visit_sidebar(self, node):
self.body.append(
self.starttag(node, 'div', CLASS='sidebar'))
self.set_first_last(node)
self.in_sidebar = True
# <sub> not allowed in <pre>
def visit_subscript(self, node):
if isinstance(node.parent, nodes.literal_block):
self.body.append(self.starttag(node, 'span', '',
CLASS='subscript'))
else:
self.body.append(self.starttag(node, 'sub', ''))
def depart_subscript(self, node):
if isinstance(node.parent, nodes.literal_block):
self.body.append('</span>')
else:
self.body.append('</sub>')
# Use <h*> for subtitles (deprecated in HTML 5)
def visit_subtitle(self, node):
if isinstance(node.parent, nodes.sidebar):
self.body.append(self.starttag(node, 'p', '',
CLASS='sidebar-subtitle'))
self.context.append('</p>\n')
elif isinstance(node.parent, nodes.document):
self.body.append(self.starttag(node, 'h2', '', CLASS='subtitle'))
self.context.append('</h2>\n')
self.in_document_title = len(self.body)
elif isinstance(node.parent, nodes.section):
tag = 'h%s' % (self.section_level + self.initial_header_level - 1)
self.body.append(
self.starttag(node, tag, '', CLASS='section-subtitle') +
self.starttag({}, 'span', '', CLASS='section-subtitle'))
self.context.append('</span></%s>\n' % tag)
def depart_subtitle(self, node):
self.body.append(self.context.pop())
if self.in_document_title:
self.subtitle = self.body[self.in_document_title:-1]
self.in_document_title = 0
self.body_pre_docinfo.extend(self.body)
self.html_subtitle.extend(self.body)
del self.body[:]
# <sup> not allowed in <pre> in HTML 4
def visit_superscript(self, node):
if isinstance(node.parent, nodes.literal_block):
self.body.append(self.starttag(node, 'span', '',
CLASS='superscript'))
else:
self.body.append(self.starttag(node, 'sup', ''))
def depart_superscript(self, node):
if isinstance(node.parent, nodes.literal_block):
self.body.append('</span>')
else:
self.body.append('</sup>')
# <tt> element deprecated in HTML 5
def visit_system_message(self, node):
self.body.append(self.starttag(node, 'div', CLASS='system-message'))
self.body.append('<p class="system-message-title">')
backref_text = ''
if len(node['backrefs']):
backrefs = node['backrefs']
if len(backrefs) == 1:
backref_text = ('; <em><a href="#%s">backlink</a></em>'
% backrefs[0])
else:
i = 1
backlinks = []
for backref in backrefs:
backlinks.append('<a href="#%s">%s</a>' % (backref, i))
i += 1
backref_text = ('; <em>backlinks: %s</em>'
% ', '.join(backlinks))
if node.hasattr('line'):
line = ', line %s' % node['line']
else:
line = ''
self.body.append('System Message: %s/%s '
'(<tt class="docutils">%s</tt>%s)%s</p>\n'
% (node['type'], node['level'],
self.encode(node['source']), line, backref_text))
# "hard coded" border setting
def visit_table(self, node):
self.context.append(self.compact_p)
self.compact_p = True
classes = ['docutils', self.settings.table_style]
if 'align' in node:
classes.append('align-%s' % node['align'])
self.body.append(
self.starttag(node, 'table', CLASS=' '.join(classes), border="1"))
def depart_table(self, node):
self.compact_p = self.context.pop()
self.body.append('</table>\n')
# hard-coded vertical alignment
def visit_tbody(self, node):
self.body.append(self.starttag(node, 'tbody', valign='top'))
#
def depart_tbody(self, node):
self.body.append('</tbody>\n')
# hard-coded vertical alignment
def visit_thead(self, node):
self.body.append(self.starttag(node, 'thead', valign='bottom'))
#
def depart_thead(self, node):
self.body.append('</thead>\n')
class SimpleListChecker(writers._html_base.SimpleListChecker):
"""
Raise `nodes.NodeFound` if non-simple list item is encountered.
Here "simple" means a list item containing nothing other than a single
paragraph, a simple list, or a paragraph followed by a simple list.
"""
def visit_list_item(self, node):
children = []
for child in node.children:
if not isinstance(child, nodes.Invisible):
children.append(child)
if (children and isinstance(children[0], nodes.paragraph)
and (isinstance(children[-1], nodes.bullet_list)
or isinstance(children[-1], nodes.enumerated_list))):
children.pop()
if len(children) <= 1:
return
else:
raise nodes.NodeFound
# def visit_bullet_list(self, node):
# pass
# def visit_enumerated_list(self, node):
# pass
# def visit_paragraph(self, node):
# raise nodes.SkipNode
def visit_definition_list(self, node):
raise nodes.NodeFound
def visit_docinfo(self, node):
raise nodes.NodeFound
def visit_definition_list(self, node):
raise nodes.NodeFound
| [
"nicolas.holzschuch@inria.fr"
] | nicolas.holzschuch@inria.fr |
9351dffdb51450ee034544e7f3c1dbe72392fda7 | 2ba99b4bd9f1b97babfc8fc303b7c47f7fc52b47 | /prob4.py | 76f6c71353fd3129dd845305677a6b1fc87183dd | [] | no_license | nsheahan/euler | ffa76e686ca7a587f812b79741c0eae0c2056560 | 0fec3e7f054c627ff5ba235917179ea6d4a0b1a4 | refs/heads/master | 2020-04-10T13:57:18.211631 | 2013-08-26T18:45:21 | 2013-08-26T18:45:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | #http://projecteuler.net/problem=4
def ispalindrome(num):
strnum = str(num)
if strnum == strnum[::-1]:
return True
else:
return False
largest = 0
for i in range(100, 1000):
for j in range(100, 1000):
product = i * j
if ispalindrome(product) and product > largest:
largest = product
print(largest) | [
"nsheahan2@gmail.com"
] | nsheahan2@gmail.com |
c83837ef391d94746e419ded7e6e8d6c9ecdac9e | bcfefb13038793c2b7554379e1ee083aba1c5469 | /ArticleSpider/ArticleSpider/spiders/lagou.py | 46cd88a0dcacec8bd35826f96a21625fa3220368 | [] | no_license | zf54274/SpiderDemo | 332081dbdd3f6a0282fc3ad8d33362cf7922463f | b2563f1fe38e8ad6b90c853884fdcb8b18b0036e | refs/heads/master | 2020-04-06T17:06:00.703124 | 2018-11-27T15:11:29 | 2018-11-27T15:11:29 | 157,640,952 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,137 | py | # -*- coding: utf-8 -*-
import scrapy
from datetime import datetime
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import LagouJobItemLoader, LagouJobItem
from ..utils.common import get_md5
class LagouSpider(CrawlSpider):
name = 'lagou'
allowed_domains = ['www.lagou.com']
start_urls = ['https://www.lagou.com/']
rules = (
Rule(LinkExtractor(allow=r'zhaopin/.*'), follow=True),
Rule(LinkExtractor(allow=r'gongsi/j\d+.html'), follow=True),
Rule(LinkExtractor(allow=r'jobs/.*[?]isSchoolJob=1'), follow=True),
Rule(LinkExtractor(allow=r'jobs/\d+.html'), callback='parse_job', follow=True),
)
def parse_job(self, response):
# 解析拉勾网的职位
item_loader = LagouJobItemLoader(item=LagouJobItem(), response=response)
item_loader.add_css("title", ".job-name::attr(title)")
item_loader.add_value("url", response.url)
item_loader.add_value("url_object_id", get_md5(response.url))
item_loader.add_css("salary_min", ".job_request .salary::text")
item_loader.add_css("salary_max", ".job_request .salary::text")
item_loader.add_xpath("job_city", "//*[@class='job_request']/p/span[2]/text()")
item_loader.add_xpath("work_years", "//*[@class='job_request']/p/span[3]/text()")
item_loader.add_xpath("degree_need", "//*[@class='job_request']/p/span[4]/text()")
item_loader.add_xpath("job_type", "//*[@class='job_request']/p/span[5]/text()")
item_loader.add_css('tags', '.position-label li::text')
item_loader.add_css("publish_time", ".publish_time::text")
item_loader.add_css("job_advantage", ".job-advantage p::text")
item_loader.add_css("job_desc", ".job_bt div")
item_loader.add_css("job_addr", ".work_addr")
item_loader.add_css("company_name", "#job_company dt a img::attr(alt)")
item_loader.add_css("company_url", "#job_company dt a::attr(href)")
item_loader.add_value("crawl_time", datetime.now())
job_item = item_loader.load_item()
return job_item
| [
"gowther1@sina.com"
] | gowther1@sina.com |
c5e60a89ed2a73c9c155f1c67d66ad55d13bc4ba | cd486d096d2c92751557f4a97a4ba81a9e6efebd | /17/addons/plugin.video.ukturk/resources/lib/scraper2.py | 0c1a6e03d1453afd6847bd928d43d611c2b92671 | [] | no_license | bopopescu/firestick-loader-kodi-data | 2f8cb72b9da67854b64aa76f720bdad6d4112926 | e4d7931d8f62c94f586786cd8580108b68d3aa40 | refs/heads/master | 2022-04-28T11:14:10.452251 | 2020-05-01T03:12:13 | 2020-05-01T03:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,184 | py | # coding: UTF-8
import sys
l111ll1llUK_Turk_No1 = sys.version_info [0] == 2
l11l1l11lUK_Turk_No1 = 2048
l111llll1UK_Turk_No1 = 7
def l11l1lUK_Turk_No1 (l1llll1lUK_Turk_No1):
global l1l1ll1llUK_Turk_No1
l11lllll1UK_Turk_No1 = ord (l1llll1lUK_Turk_No1 [-1])
l11l111llUK_Turk_No1 = l1llll1lUK_Turk_No1 [:-1]
l1lll1lllUK_Turk_No1 = l11lllll1UK_Turk_No1 % len (l11l111llUK_Turk_No1)
l1l11llllUK_Turk_No1 = l11l111llUK_Turk_No1 [:l1lll1lllUK_Turk_No1] + l11l111llUK_Turk_No1 [l1lll1lllUK_Turk_No1:]
if l111ll1llUK_Turk_No1:
l1ll1llUK_Turk_No1 = unicode () .join ([unichr (ord (char) - l11l1l11lUK_Turk_No1 - (l11lllUK_Turk_No1 + l11lllll1UK_Turk_No1) % l111llll1UK_Turk_No1) for l11lllUK_Turk_No1, char in enumerate (l1l11llllUK_Turk_No1)])
else:
l1ll1llUK_Turk_No1 = str () .join ([chr (ord (char) - l11l1l11lUK_Turk_No1 - (l11lllUK_Turk_No1 + l11lllll1UK_Turk_No1) % l111llll1UK_Turk_No1) for l11lllUK_Turk_No1, char in enumerate (l1l11llllUK_Turk_No1)])
return eval (l1ll1llUK_Turk_No1)
import urllib,urllib2,re,os
def l11lll11l1UK_Turk_No1():
string=l11l1lUK_Turk_No1 (u"ࠨࠩැ")
link=l1llll111UK_Turk_No1(l11l1lUK_Turk_No1 (u"ࠤࡸࡹࡶ࠺࠰࠱ࡦࡶࡩࡦࡳࡧࡨ࠲ࡸࡩࡧࡱࡲࡸࡧࡧࡻ࠭ࡴࡶࡵࡩࡦࡳࠢෑ"))
events=re.compile(l11l1lUK_Turk_No1 (u"ࠪࡀࡹࡪ࠾࠽ࡵࡳࡥࡳࠦࡣࡣࡶࡷࡂࠨࡳࡱࡱࡵࡸ࠲ࡣࡰࡰࠫ࠲࠰ࡅࠩ࠽࠱ࡷࡶࡃ࠭ි"),re.DOTALL).findall(link)
for event in events:
l11lll111lUK_Turk_No1=re.compile(l11l1lUK_Turk_No1 (u"ࠫࡁࡺࡤࠪ࠱ࡄ࠼ࡣࡴࠫ࠲࠰ࡅࠩ࠽࠱ࡷࡨࡃ࠭ී")).findall(event)
for day,date in l11lll111lUK_Turk_No1:
day=l11l1lUK_Turk_No1 (u"ࠬࡡࡃࡐࡎࡒࡖࠥࡦࡠࠫු")+day+l11l1lUK_Turk_No1 (u"࡛࠭࠰ࡅࡒࡐࡔࡘࠨ")
date=date.replace(l11l1lUK_Turk_No1 (u"ࠧࠩූ"),l11l1lUK_Turk_No1 (u"ࠨࠩ"))
time=re.compile(l11l1lUK_Turk_No1 (u"ࠩࡸࡩࠦࡣࡣࡶࡷࡂࠨࡢࡶࡦࡹࡦࠤࠣࡷࡹࡿࡦࠥࡧࡴࡲࡳ࠼ࠦ࠹࠹࠻࠴࠶࠶࠾ࡪࡴࡴࡴࡹࡨࡴ࠻ࡤࡲࡰࡩࡁࡦࡰࡰࡷ࠱ࡸࡺࡦ࠼ࠣ࠽ࡵࡾࠢࠪ࠱ࡄ࠼࠰ࡶࡧࡂࠬෘ")).findall(event)[0]
time=l11l1lUK_Turk_No1 (u"ࠪࡈࡕࡌࡐࡔࠣࡦࡱࡻࡥ࡞ࠪࠪෙ")+time+l11l1lUK_Turk_No1 (u"ࠫࡡࡄࡑࡏࡓࡗࡣࠧේ")
l11lll1l11UK_Turk_No1=re.compile(l11l1lUK_Turk_No1 (u"ࠬࡂࡡࠡࡵࡷࡽࡱ࠽ࠣࡶࡨࡼࡹ࠳ࡤࡦࡥࡲࡶࡦࡺࡩࡰࡰ࠽ࡲࡴࡴࡥࠡࠣࡱࡵࡵࡲࡵࡣࡱࡸࡀࡩࡱࡵ࠾ࠨ࠻࠴࠶࠶࠸࠸ࡀࠨࠠࡩࡴࡨࡪࡂࠨࠨ࠭ࡂ࠭ࠧࠦࡴࡢࡴࡪࡩࡹࡃࠢࡠࡤࡥࡳࡱࠢࠪ࠱ࡄ࠼࠰ࡣࡁࡀ࠴ࡺࡤࠩෛ")).findall(event)
for url,l11lll11llUK_Turk_No1 in l11lll1l11UK_Turk_No1:
url=url
l11lll11llUK_Turk_No1=l11lll11llUK_Turk_No1
string=string+l11l1lUK_Turk_No1 (u"࠭࠾ࡸࡪࡳ࠾ࡰࡸࡺࡦࡀࠨࡷࡁ࠵ࡴࡪࡶࡩࡃࡢ࠽ࡵࡳࡳࡷࡺࡳࡥࡧࡹࡱࡄࠥࡴ࠾࠲ࡷࡵࡵࡲࡵࡵࡧࡩࡻ࡞ࡱࠫො")%(day+l11l1lUK_Turk_No1 (u"ࠧࠡࠩෝ")+time+l11l1lUK_Turk_No1 (u"ࠨࠢ࠰ࠤࠬෞ")+l11lll11llUK_Turk_No1,url)
string=string+l11l1lUK_Turk_No1 (u"ࠩࡸࡻࡣࡰࡤࡱࡄࡉࡣࡪࡩࡍࡲࡦ࠾࠲ࡸࡻࡣࡰࡤࡱࡄ࠾ࡩࡥࡳࡧࡲࡵࡀࡩࡥࡳࡧࡲࡵ࠾࠲ࡪࡦࡴࡡࡳࡶࡁࡠࡳࡂࡪࡶࡨࡱࡃࡢࠨෟ")
return string
def l1llll111UK_Turk_No1(url):
req = urllib2.Request(url)
req.add_header(l11l1lUK_Turk_No1 (u"࡙ࠪࡸࡲࡃࡪࡩࡳࡺࠧ"), l11l1lUK_Turk_No1 (u"ࠫࡒࡵࡺࡪࡥ࠴࠻࠱࡛ࠢࠫࡴࡤࡰࡹࡶࠤࡓ࡚ࠠ࠲࠲࠱࠴ࠦࡁࡱࡲࡩࡢࡌࡷ࠳࠺࠹࠷࠵࠹ࠤ࠭ࡑࡈࡕࡏࡏ࠰ࠥࡲࡩࡧࠣࡋࡪࡩࡰࠫࠣࡇࡸࡧ࠲࠹࠹࠴࠰࠴࠻࠸࠵࠴࠷࠲ࠢࡖࡥࡧࡲࡪ࠱࠸࠷࠼࠴࠳࠷ࠩ"))
response = urllib2.urlopen(req)
link=response.read()
return link | [
"esc0rtd3w@gmail.com"
] | esc0rtd3w@gmail.com |
427b6397c36c24e7539cdd30c899041cb84e0990 | 767a11794e16cd9ae162d0405f1320188736011b | /uploadfile.py | 35a76924dd8aa6799207b2a131dde00be15463b9 | [] | no_license | assilos/Angular-Flask-Test | 89cf24838615bae560aa92683bc042d28171472e | e6d7fdd5ec30c3edec97864d8209ae79046c1ef0 | refs/heads/master | 2022-12-09T12:10:53.594593 | 2020-09-05T00:28:42 | 2020-09-05T00:28:42 | 292,971,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | import os
import urllib.request
from uploadapp import app
from flask import Flask, request, redirect, jsonify
from werkzeug.utils import secure_filename
ALLOWED_EXTENSIONS = set(['txt', 'pdf'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def upload_file(file):
# check if the post request has the file part
if 'file' not in request.files:
resp = jsonify({'message' : 'No file part in the request'})
resp.status_code = 400
return resp
file = request.files['file']
if file.filename == '':
resp = jsonify({'message' : 'No file selected for uploading'})
resp.status_code = 400
return resp
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
resp = jsonify({'message' : 'File successfully uploaded'})
resp.status_code = 201
return resp
else:
resp = jsonify({'message' : 'Allowed file types are txt, pdf, png, jpg, jpeg, gif'})
resp.status_code = 400
return resp
| [
"noreply@github.com"
] | assilos.noreply@github.com |
16a563e4fe219f362ae2e8ff0746138a4700b5dc | 9d769574d51cd7fb0bdebe556cccfbffdd2846e1 | /Guess_Who/envs/__init__.py | f5cace9ed4661ecd0242a8bb1ccbae7cc40b5c57 | [] | no_license | alexfallin/Guess-Who | d037368980c35a3bca3b40a231a097cb44a64154 | e01dd51247c3889a7c435183c80a1c5a14e4ac71 | refs/heads/master | 2023-03-22T12:04:07.902277 | 2022-08-15T07:18:29 | 2022-08-15T07:18:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | from gym_foo.envs.guesswho_env import GuesswhoEnv | [
"cruleis@gmail.com"
] | cruleis@gmail.com |
fcec77cce9623016e66a0e472c9c2c74d8ebb661 | e5517f22fbdd8873b2a7515d38084370bc578c97 | /While/while.py | efa9859b98d2828a25759bb3598d00d5789d672d | [] | no_license | jhollis67/Python-Masterclass | 047b580e8d7adec06d1dc4d6c46e05f6924a963e | 7f0672e8318c2485389b70da81be8a1e9a25560f | refs/heads/master | 2020-03-13T18:08:40.804302 | 2018-05-16T10:47:42 | 2018-05-16T10:47:42 | 131,230,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 912 | py |
# for i in range(10):
# print("i is now {}".format(i))
# i = 0
# while i < 10:
# print("i is now {}".format(i))
# i += 1
#
# availableExits = ["east", "north east", "south"]
#
# chosenExit = ""
# while chosenExit not in availableExits:
# chosenExit = input("Please choose a direction: ")
# if chosenExit == "quit":
# print("Game Over")
# break
#
# else:
# print("Aren't you glad you got out of there?")
import random
highest = 10
answer = random.randint(1, highest)
i
print("Please guess a number between 1 and {}".format(highest))
guess = 0 # initialize to any number outside of the valid range
while guess != answer:
guess = int(input())
if guess < answer:
print("Please guess higher")
elif guess > answer: # guess must be greater than random number
print("Please guess lower")
else:
print("Well done, you've guessed it")
| [
"jhollis67@me.com"
] | jhollis67@me.com |
94ed5e380f49bf3d497d587c95ec1d3ec6e65bad | dcbedd4c06aa0cf78cf1d881a61f2a0cdb06005a | /(Keras) IMDB Dataset.py | 756f84210ce7f7a14cdf371a8ffa4145def4e726 | [] | no_license | KevinHooah/recurrent-dropout-experiments | 064243f403687a7e063a6464ce015d282a8a0dfb | 96b2aa2478fb46a252251c0b49354a2de40c7684 | refs/heads/master | 2020-08-29T23:43:01.440740 | 2019-08-07T03:43:23 | 2019-08-07T03:43:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,816 | py |
# coding: utf-8
# # (Keras) IMDB Dataset
# In[1]:
import numpy as np
from tensorflow.contrib.keras.python.keras.optimizers import SGD, RMSprop, Adagrad
from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers.core import Dense, Dropout
from tensorflow.contrib.keras.python.keras.layers.embeddings import Embedding
from tensorflow.contrib.keras.python.keras.layers.recurrent import LSTM, GRU, SimpleRNN
from tensorflow.contrib.keras.python.keras.regularizers import l2
from tensorflow.contrib.keras.python.keras.optimizers import Adam
from tensorflow.contrib.keras.python.keras.preprocessing import sequence
from tensorflow.contrib.keras.python.keras.datasets import imdb
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from yaringal_callbacks import ModelTest
from yaringal_dataset import loader
get_ipython().magic('matplotlib inline')
plt.style.use('fivethirtyeight')
plt.rcParams["figure.figsize"] = (8, 5)
# Global params:
NB_WORDS = 20000
SKIP_TOP = 0
TEST_SPLIT = 0.2
INIT_SEED = 2017
GLOBAL_SEED = 2018
MAXLEN = 80
BATCH_SIZE = 128
TEST_BATCH_SIZE = 512
WEIGHT_DECAY = 1e-4
# In[2]:
np.random.seed(100)
# In[3]:
(X_train, Y_train), (X_test, Y_test) = imdb.load_data(num_words=NB_WORDS)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=MAXLEN)
X_test = sequence.pad_sequences(X_test, maxlen=MAXLEN)
print('x_train shape:', X_train.shape)
print('x_test shape:', X_test.shape)
# In[4]:
def get_model(idrop=0.2, edrop=0.1, odrop=0.25, rdrop=0.2, weight_decay=WEIGHT_DECAY):
model = Sequential()
model.add(Embedding(NB_WORDS, 128, embeddings_regularizer=l2(weight_decay),
input_length=MAXLEN)) # , batch_input_shape=(batch_size, maxlen)))
if edrop:
model.add(Dropout(edrop))
model.add(LSTM(128, kernel_regularizer=l2(weight_decay), recurrent_regularizer=l2(weight_decay),
bias_regularizer=l2(weight_decay), dropout=idrop, recurrent_dropout=rdrop))
if odrop:
model.add(Dropout(odrop))
model.add(Dense(1, kernel_regularizer=l2(weight_decay),
bias_regularizer=l2(weight_decay), activation='sigmoid'))
optimizer = Adam(1e-3)
model.compile(loss='binary_crossentropy', metrics=["binary_accuracy"], optimizer=optimizer)
return model
# ## Normal Variational LSTM (w/o Embedding Dropout)
# All models in this notebook do not have embedding dropout as Keras does not have such layer.
# In[5]:
print('Build model...')
model = get_model(idrop=0.25, edrop=0, odrop=0.25, rdrop=0.25, weight_decay=1e-4)
# In[6]:
modeltest_1 = ModelTest(X_test, Yt=Y_test,
test_every_X_epochs=1, verbose=0,
loss='binary', batch_size=TEST_BATCH_SIZE)
# In[7]:
history_1 = model.fit(
X_train, Y_train,
verbose=2,
shuffle=True,
# validation_data=[X_test, Y_test],
batch_size=BATCH_SIZE, epochs=20, callbacks=[modeltest_1])
# In[11]:
best_epoch = np.argmin([x[1] for x in modeltest_1.history[:18]]) + 1
print("Best Loss: {:.4f} Acc: {:.2f}% Best Epoch: {}".format(
modeltest_1.history[best_epoch-1][1],
modeltest_1.history[best_epoch-1][3] * 100,
best_epoch
))
# In[12]:
plt.title("Log Loss Comparison")
plt.plot(np.arange(len(modeltest_1.history)), [x[0] for x in modeltest_1.history], label="std")
plt.plot(np.arange(len(modeltest_1.history)), [x[1] for x in modeltest_1.history], "g-", label="mc")
plt.legend(loc='best')
# In[13]:
plt.title("Accuracy Comparison")
plt.plot(np.arange(0, len(modeltest_1.history)), [x[2] for x in modeltest_1.history], label="std")
plt.plot(np.arange(0, len(modeltest_1.history)), [x[3] for x in modeltest_1.history], "g-", label="mc")
plt.legend(loc='best')
# ## Standard LSTM
# I choose to keep a very low weight decay because assigning zero seems to cause some problems.
# In[14]:
print('Build model...')
model = get_model(edrop=0, rdrop=0, odrop=0, idrop=0, weight_decay=1e-10)
# In[15]:
modeltest_2 = ModelTest(X_test, Yt=Y_test,
test_every_X_epochs=1, verbose=0, T=1,
loss='binary', batch_size=TEST_BATCH_SIZE)
# In[17]:
history_2 = model.fit(
X_train, Y_train,
verbose=2,
shuffle=True,
# validation_data=[X_test, Y_test],
batch_size=BATCH_SIZE, epochs=20, callbacks=[modeltest_2])
# In[25]:
best_epoch = np.argmin([x[1] for x in modeltest_2.history]) + 1
print("Best Loss: {:.4f} Acc: {:.2f}% Best Epoch: {}".format(
modeltest_2.history[best_epoch-1][1],
modeltest_2.history[best_epoch-1][3] * 100,
best_epoch
))
# ## LSTM with Standard Dropout (different mask at differnt time steps)
# In[20]:
print('Build model...')
model = get_model(edrop=0.25, rdrop=0, odrop=0.25, idrop=0, weight_decay=1e-4)
# In[21]:
modeltest_3 = ModelTest(X_test, Yt=Y_test,
test_every_X_epochs=1, verbose=0, T=10,
loss='binary', batch_size=TEST_BATCH_SIZE)
# In[22]:
history_3 =model.fit(
X_train, Y_train,
verbose=2,
shuffle=True,
# validation_data=[X_test, Y_test],
batch_size=BATCH_SIZE, epochs=20, callbacks=[modeltest_3])
# In[24]:
best_epoch = np.argmin([x[1] for x in modeltest_3.history[:19]]) + 1
print("Best Loss: {:.4f} Acc: {:.2f}% Best Epoch: {}".format(
modeltest_3.history[best_epoch-1][1],
modeltest_3.history[best_epoch-1][3] * 100,
best_epoch
))
# ## Visualizations
# In[40]:
bins = np.arange(-0.1, 0.035, 0.01)
# In[53]:
len(history_2.history["binary_accuracy"])
# In[54]:
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.title("Accuracy Comparison - Training Set")
plt.plot(np.arange(len(history_2.history["binary_accuracy"])),
np.array(history_1.history["binary_accuracy"][:20]) * 100, label="variational")
plt.plot(np.arange(len(history_2.history["binary_accuracy"])),
np.array(history_2.history["binary_accuracy"]) * 100, "g-", label="no dropout")
plt.plot(np.arange(len(history_3.history["binary_accuracy"])),
np.array(history_3.history["binary_accuracy"]) * 100, "y-", label="naive dropout")
plt.legend(loc='best')
plt.xlabel("epochs")
plt.ylabel("Accuracy")
plt.subplot(1, 2, 2)
plt.title("(MC - Approx) Histogram")
plt.hist([x[1] - x[0] for x in modeltest_1.history[:17]], bins=bins, alpha=0.5, label="varational")
plt.hist([x[1] - x[0] for x in modeltest_3.history[:17]], bins=bins, alpha=0.5, label="navie dropout")
plt.legend(loc='best')
plt.xlabel("Difference in Loss")
plt.ylabel("Count")
plt.xticks(fontsize=8, rotation=0)
# In[60]:
plt.figure(figsize=(12, 4))
plt.subplot(1, 2, 1)
plt.title("Log Loss Comparison - Validation Set")
plt.plot(np.arange(len(modeltest_2.history)), [x[1] for x in modeltest_1.history[:20]], "b-", label="variational(mc)")
plt.plot(np.arange(len(modeltest_2.history)), [x[1] for x in modeltest_2.history], "g-", label="no dropout")
plt.plot(np.arange(len(modeltest_3.history)), [x[1] for x in modeltest_3.history], "y-", label="naive dropout(mc)")
plt.legend(loc='best')
plt.xlabel("epochs")
plt.ylabel("Log Loss")
plt.subplot(1, 2, 2)
plt.title("Accuracy Comparison - Validation Set")
plt.plot(np.arange(len(modeltest_2.history)), [x[3] * 100 for x in modeltest_1.history[:20]], "b-", label="variational(mc)")
plt.plot(np.arange(len(modeltest_2.history)), [x[3] * 100 for x in modeltest_2.history], "g-", label="no dropout")
plt.plot(np.arange(len(modeltest_3.history)), [x[3] * 100 for x in modeltest_3.history], "y-", label="naive dropout(mc)")
plt.legend(loc='best')
plt.xlabel("epochs")
plt.ylabel("Accuracy (%)")
# In[ ]:
| [
"shuanck@gmail.com"
] | shuanck@gmail.com |
87a745f6ea4d61a77ceb94697256d9d98ab1870d | f8b2d2d5e97ddfaa38862ed1c5af8f3a7680a3d0 | /points.py | 3dfe2716c108a622307d02931ec20860ef1542af | [] | no_license | mikronavt/study | 7a82343bdc130144c9d8c05b2c79cea12b348bfa | 1c28dfe2d90aca815e1988b34693ff2c275c9db5 | refs/heads/master | 2020-12-24T09:53:44.061442 | 2016-11-09T07:15:44 | 2016-11-09T07:15:44 | 73,260,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py |
from collections import namedtuple
# make a basic Point class
Point = namedtuple('Point', ["lat", "lon"])
points = [Point(1,2),
Point(3,4),
Point(5,6)]
# implement the function gmaps_img(points) that returns the google maps image
# for a map with the points passed in. A example valid response looks like
# this:
#
# http://maps.googleapis.com/maps/api/staticmap?size=380x263&sensor=false&markers=1,2&markers=3,4
#
# Note that you should be able to get the first and second part of an individual Point p with
# p.lat and p.lon, respectively, based on the above code. For example, points[0].lat would
# return 1, while points[2].lon would return 6.
GMAPS_URL = "http://maps.googleapis.com/maps/api/staticmap?size=380x263&sensor=false&"
def gmaps_img(points):
###Your code here
G_URL = GMAPS_URL
for point in points:
G_URL = G_URL +"markers=" + str(point.lat) + "," + str(point.lon) + "&"
G_URL = G_URL[:-1]
return G_URL
print(gmaps_img(points))
s= "fjfjfjf"
print(len(s))
print(s[1:-2])
| [
"chgb-tol@ya.ru"
] | chgb-tol@ya.ru |
7d8f8ad2798d084d3fefc7341b7cb6421986ee29 | e73121fcfcc4df2e7092a82f4810ce9615e9dd83 | /Codeforces/Juggling Characters.py | 962c9d9a3ff799b5ab4959335989f0a821453db5 | [] | no_license | Redwanuzzaman/Online-Judge-Problem-Solutions | 1aba5eda26a03ed8cafaf6281618bf13bea7699b | f2f4ccac708bd49e825f2788da886bf434523d3c | refs/heads/master | 2022-08-29T04:10:31.084874 | 2022-08-14T18:20:30 | 2022-08-14T18:20:30 | 142,601,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | for cases in range(int(input())):
n = int(input())
characters = {}
for strings in range(n):
string = input()
for i in string:
characters[i] = characters.get(i, 0) + 1
status = True
for value in characters.values():
if value % n != 0:
status = False
break
if status:
print("YES")
else:
print("NO")
| [
"noreply@github.com"
] | Redwanuzzaman.noreply@github.com |
5acc75c1e95ffbca7fcf9961714a7d331e6f2873 | 9259c17e559cebe4e82e60471f77b2ec757efc82 | /MaskOperation/MaskOper.py | 4675303854a899796893f296c3c194f3023d41fc | [] | no_license | LPSYSY/DIP | ba6a6ab54049b7ac97e42661cd753932ee1b1ba6 | c0da24cf8349788d68a2a1390eb5911a85c91ba7 | refs/heads/master | 2023-01-06T17:20:47.735296 | 2020-11-03T11:59:47 | 2020-11-03T11:59:47 | 305,015,808 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,650 | py | import cv2 as cv
import numpy as np
def cv_show(name, img):
'''
显示图像
'''
cv.imshow(name, img)
cv.waitKey(0)
cv.destroyAllWindows()
def showTwoPics(imgOrigin, imgResult):
'''
同时显示两张图片进行对比
'''
return np.hstack((imgOrigin, imgResult))
def ImageSmooth(img, fileterType, kernel=3):
imgOrigin = img
# 均值滤波
if fileterType == 'average':
imgResult = cv.blur(imgOrigin, (kernel, kernel))
cv_show('res', showTwoPics(imgOrigin, imgResult))
# 中值滤波
elif fileterType == 'median':
imgResult = cv.medianBlur(imgOrigin, kernel)
cv_show('res', showTwoPics(imgOrigin, imgResult))
def ImageSharpen(img, filerType, kernel=3):
imgOrigin = img
if filerType == 'Sobel':
# Sobel算子图像锐化
imgResultX = cv.Sobel(imgOrigin, cv.CV_64F, 1, 0, ksize=kernel)
imgResultY = cv.Sobel(imgOrigin, cv.CV_64F, 0, 1, ksize=kernel)
imgResultX = cv.convertScaleAbs(imgResultX)
imgResultY = cv.convertScaleAbs(imgResultY)
imgResult = cv.addWeighted(imgResultX, 0.5, imgResultY, 0.5, 0)
cv_show('res', showTwoPics(imgOrigin, imgResult))
elif filerType == 'Laplace':
imgResult = cv.Laplacian(img, cv.CV_64F)
imgResult = cv.convertScaleAbs(imgResult)
cv_show('res', showTwoPics(imgOrigin, imgResult))
if __name__ == "__main__":
imgOrigin = cv.imread('pictures/lena.jpg', 0)
# ImageSmooth(imgOrigin, 'average', 5)
# ImageSmooth(imgOrigin, 'median', 5)
# ImageSharpen(imgOrigin, 'Sobel', 3)
ImageSharpen(imgOrigin, 'Laplace', 3) | [
"www.1215178414@qq.com"
] | www.1215178414@qq.com |
58e67e60e1de0ddb45d1f19ae0b484c6e81d10a9 | 422d46ecde4b29ee2c34f1a2a30ba0417a33220f | /catkin_ws/build/catkin_generated/stamps/Project/_setup_util.py.stamp | d600a9524c26916a3b7555a59db9496caaa16a6c | [] | no_license | biniamzerai/BusBot | 68cf39f947a468b02b08ed1baad5afaf838a61e9 | 8522ba21d98f909ca29ddfd41b6047acc7f97691 | refs/heads/master | 2021-01-07T16:18:04.682683 | 2020-01-26T01:33:58 | 2020-01-26T01:33:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,359 | stamp | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
PATH_TO_ADD_SUFFIX = ['bin']
if IS_WINDOWS:
# while catkin recommends putting dll's into bin, 3rd party packages often put dll's into lib
# since Windows finds dll's via the PATH variable, prepend it with path to lib
PATH_TO_ADD_SUFFIX.extend([['lib', os.path.join('lib', 'x86_64-linux-gnu')]])
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': PATH_TO_ADD_SUFFIX,
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if not args.local:
# environment at generation time
CMAKE_PREFIX_PATH = '/home/jacob/catkin_ws/devel;/opt/ros/kinetic'.split(';')
else:
# don't consider any other prefix path than this one
CMAKE_PREFIX_PATH = []
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
# CMAKE_PREFIX_PATH uses forward slash on all platforms, but __file__ is platform dependent
# base_path on Windows contains backward slashes, need to be converted to forward slashes before comparison
if os.path.sep != '/':
base_path = base_path.replace(os.path.sep, '/')
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| [
"reed.jacobp@gmail.com"
] | reed.jacobp@gmail.com |
edc2f22a48bc2753d69d353303835fb0c08e54e7 | 1bda09bc8fbf74548d1ce888df90866c23946941 | /looting_art/looting_art/asgi.py | bd678432c2252cf264887bfc486f5d9a6fa066cc | [] | no_license | parisdata/2021GLAMHACK | b2fd6fd1324d7a3053c9732a15d58413b0145fc6 | 2591cac9f08d7222b8adecb56dad3071dacd86d2 | refs/heads/main | 2023-04-09T13:51:34.525229 | 2021-04-17T14:24:22 | 2021-04-17T14:25:12 | 358,595,756 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | """
ASGI config for looting_art project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'looting_art.settings')
application = get_asgi_application()
| [
"daisywheel22@gmail.com"
] | daisywheel22@gmail.com |
e925f4d15ce49f28fb5541824110fbe83b64f481 | fe0e34526b1470134b83fd98931ddfb6a83eb14b | /work_with_database/phones/migrations/0001_initial.py | dae16a2be8f8758a1ebffd2ed52eea81ee932ea9 | [] | no_license | Alexklai92/django_2 | 494c3e072558ea3a49388458ee599d92025ccee0 | f4b0def72105806f33d236406f73415693fc7084 | refs/heads/master | 2022-12-12T11:16:09.474443 | 2019-09-18T06:01:33 | 2019-09-18T06:01:33 | 199,710,403 | 0 | 0 | null | 2022-12-08T05:57:51 | 2019-07-30T18:57:21 | Python | UTF-8 | Python | false | false | 902 | py | # Generated by Django 2.0.5 on 2019-08-06 22:06
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64, verbose_name='Имя')),
('price', models.IntegerField(verbose_name='Цена')),
('image', models.CharField(max_length=128, verbose_name='Изображение')),
('release_date', models.DateField(verbose_name='Дата релиза')),
('lte_exists', models.BooleanField(verbose_name='LTE')),
('slug', models.CharField(max_length=70)),
],
),
]
| [
"aklai@inbox.ru"
] | aklai@inbox.ru |
2ba795bc87ecfec801fbc0b79121ab8944f8a22c | 32bedd47b66e228f957ec76c051851107cebdb50 | /src/base_will.py | 7d7fab787d0dbf880944b251b51c82866c6219a2 | [] | no_license | caleb-and-will/HashCode2018 | d900b0c9827b7481dff642541a693d867ef9da81 | 80deefbc1b8b4a91acb219df9b693e3478aa551a | refs/heads/master | 2021-01-25T12:24:04.963987 | 2018-03-01T21:37:14 | 2018-03-01T21:37:14 | 123,469,864 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,631 | py | """
"""
# Classes
class City:
""" Represents a city in an input file.
Properties:
grid (int, int): (number of rows, number of columns)
vehicles (list of Vehicle): list of all available vehicles
rides (list of Ride): list of all rides
ride_num: number of rides
bonus: per-ride bonus for starting ride on time
step_num: number of steps in the simulation
"""
def __init__(self, file):
with open(file) as f:
line = f.readline()
values = line.strip('\n').split(' ')
self.grid = (int(values[0]), int(values[1]))
self.ride_num = int(values[3])
self.bonus = int(values[4])
self.step_num = int(values[5])
self.vehicles = self.get_vehicles(int(values[2]))
self.rides = self.get_rides(file)
def __repr__(self):
return ('grid: ' + str(self.grid) +
'\nnumber of vehicles: ' + str(len(self.vehicles)) +
'\nnumber of rides: ' + str(self.ride_num) +
'\nper-ride bonus: ' + str(self.bonus) +
'\nnumber of steps: ' + str(self.step_num)
)
def get_rides(self, file):
rides = []
with open(file) as f:
next(f)
cur_ride = 0
for line in f:
values = line.split(' ')
values[-1] = values[-1][-2]
r = Ride(cur_ride,
(int(values[0]), int(values[1])),
(int(values[2]), int(values[3])),
int(values[4]),
int(values[5])
)
rides.append(r)
cur_ride += 1
return rides
def get_vehicles(self, n):
vehicles = []
for i in range(0, n):
vehicles.append(Vehicle(i))
return vehicles
def get_free_vehicles(self, current_step):
free = []
for v in self.vehicles:
if (v.step_busy_until <= current_step):
free.append(v)
return free
def get_waiting_rides(self):
waiting = []
for r in self.rides:
if (not r.is_taken):
waiting.append(r)
return waiting
class Ride:
""" Represents a requested ride in the input file.
Properties:
start_intersection (int, int): (row, column)
finish_intersection (int, int): (row, column)
earliest_start (int): earliest time ride may start
latest_finish (int): earliest time ride may finish
is_taken (boolean): false if the journey has not yet been taken, true
otherwise
"""
def __init__(self, r_id, start_intersection, finish_intersection,
earliest_start, latest_finish):
self.id = r_id
self.start_intersection = start_intersection
self.finish_intersection = finish_intersection
self.earliest_start = earliest_start
self.latest_finish = latest_finish
self.distance = get_distance_between_points(start_intersection,
finish_intersection)
self.is_taken = False
def __repr__(self):
return ('id: ' + str(self.id) +
'\nstart intersection: ' + str(self.start_intersection) +
'\nfinish intersection: ' + str(self.finish_intersection) +
'\nearliest start: ' + str(self.earliest_start) +
'\nlatest finish: ' + str(self.latest_finish) +
'\ndistance: ' + str(self.distance) +
'\nhas been taken: ' + str(self.is_taken)
)
class Vehicle:
""" Represents a vehicle in the input file.
Properties:
current_position (int, int): current postion of the vehicle
ride (Ride): ride object assigned to this vehicle
"""
def __init__(self, v_id):
self.id = v_id
self.current_position = (0, 0)
self.ride = None
self.step_busy_until = 0
def __repr__(self):
return ('[' + str(self.id) + ', ' + str(self.current_position) +
', ' + str(self.ride) + ']'
)
# Functions
def get_distance_between_points(pos1, pos2):
return (
abs(pos1[0] - pos2[0]) +
abs(pos1[1] - pos2[1])
)
def create_matrix(r, c):
road_matrix = []
for i in range(0, r):
road_matrix.append([0 for i in range(0, c)])
return road_matrix
def print_file_info(file):
city = City(file)
print(city)
print('\n---\n')
for r in city.rides:
print(r, '\n')
| [
"williamthomson97@gmail.com"
] | williamthomson97@gmail.com |
52b24c2dfbc7080ab4e480f25278b8892b86b30d | dd8dccf07f7a2f46912409f76a5342c2e1b63df8 | /app.py | 360337c1a90a6af0f6afd56b1c399b21bdac92cc | [
"Apache-2.0"
] | permissive | ejolly/paperwiki | 54b1dc3e0f16a14989ddc7e4b64878cf668b1c4a | 653f8f042d54f9596fa82ee066414b57c8f50627 | refs/heads/master | 2020-04-28T13:16:24.757451 | 2018-08-16T23:46:08 | 2018-08-16T23:46:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,744 | py | # App from sanic
from flask import Flask, render_template, request, jsonify
from flask_wtf import FlaskForm
from flask_pagedown.fields import PageDownField
from wtforms.fields import SubmitField
from flask_pagedown import PageDown
# for markdown
import markdown
from flask import Markup
from crossref.restful import Works
class PageDownFormExample(FlaskForm):
pagedown = PageDownField('Enter your markdown')
submit = SubmitField('Submit')
# Handle mongo queries async style
from motor.motor_asyncio import AsyncIOMotorClient
from flask_pymongo import PyMongo
# Template rendering
# from jinja2 import DictLoader
# import jinja2_sanic as j2s
# External async connections
import asyncio
import uvloop
# Utils
# from sanic.response import json
import os, subprocess, threading
import json
from datetime import datetime
# Scholar searcher.
from scholar import SearchScholarQuery, ScholarQuerier, SearchScholarQuery,ScholarSettings
# Create app
app = Flask(__name__)
pagedown = PageDown(app)
app.config.from_pyfile('./config.py')
app.config.update(dict(
SECRET_KEY="powerful secretkey yes!",
WTF_CSRF_SECRET_KEY="a csrf secret key"
))
# app.db = AsyncIOMotorClient(app.config['MONGOURI'])['paperwiki']
app.config["MONGO_URI"] = app.config['MONGOURI']
mongo = PyMongo(app)
# session = {}
# @app.middleware('request')
# def add_session(request):
# request['session'] = session
# Configure templates
# template_dict = {}
# template_dict['home'] = open('./templates/home.html').read()
# template_dict['see_wiki'] = open('./templates/see_wiki.html').read()
# template_dict['create_wiki'] = open('./templates/create_wiki.html').read()
# j2s.setup(app,loader=DictLoader(template_dict))
# Create async mongo connection
# Make motor-mongo use the same event loop as sanic
# @app.listener('before_server_start')
# def setup_db(app,loop):
# app.db = AsyncIOMotorClient(app.config['MONGOURI'])['paperwiki']
@app.route("/", methods=['GET', 'POST'])
def home():
resp = render_template("home.html")
return resp
async def do_find_one(clusterID):
document = await app.db.paperwiki.find_one({'clusterID': clusterID})
return document
@app.route("/search", methods=['GET','POST'])
def search():
"""
Uses scholar.py to read documents from google search.
"""
queries = {}
for key in ['author','words']:
val = request.form[key]
if len(val)>0:
queries[key] = request.form[key]
else:
queries[key] = None
works = Works() # init api scraper
articles_q = works.query(title=queries['words'], author=queries['author']).sample(20)
articles = []
for article in articles_q:
articles.append(article)
doi = article['DOI']
search_result = mongo.db.paperwiki.find_one({ "DOI" : doi})
if search_result:
if 'content' in search_result.keys():
article['actionurl'] = "see_wiki?id=" + doi
article['wiki_exists'] = True
else:
article['actionurl'] = "create_wiki?id=" + doi
article['wiki_exists'] = False
else:
insert_id = mongo.db.paperwiki.insert_one(article)
article['actionurl'] = "create_wiki?id=" + doi
article['wiki_exists'] = False
context = {"docs":articles}
resp = render_template("home.html",docs=articles)
return resp
@app.route("/create_wiki", methods=['GET','POST'])
@app.route('/create_wiki/<id>')
def create_wiki(id=None):
"""
Create new wiki page
"""
clusterID = str(request.form['create_wiki'])
submit_url = "submit_wiki?id=" + clusterID
doc = mongo.db.paperwiki.find_one({ "DOI" : clusterID})
print('This is the article ID: ',clusterID)
context={"cluster_id":request.form['create_wiki']}
form = PageDownFormExample()
if form.validate_on_submit():
text = form.pagedown.data
if 'content' not in doc.keys():
doc['content'] = "Add information about article here!"
resp = render_template("create_wiki.html", id = clusterID, submit_url=submit_url, form = form,doc=doc)
return resp
@app.route("/submit_wiki", methods=['GET','POST'])
@app.route('/submit_wiki/<id>')
def submit_wiki(id=None):
"""
Submit new or modified wiki page
"""
# print(request.form['submit_wiki'])
clusterID = str(request.args.get('id'))
response = json.loads(request.form['submit_wiki'])
search_result = mongo.db.paperwiki.find_one({ "DOI" : clusterID})
search_result['content'] = str(response['content'])
insert_id = mongo.db.paperwiki.replace_one({'_id':search_result['_id']},search_result) # mongo
content = Markup(markdown.markdown(search_result['content']))
search_result['actionurl'] = "create_wiki?id=" + clusterID
resp = render_template("see_wiki.html",id=clusterID,doc=search_result,content=content)
return resp
@app.route("/see_wiki", methods=['GET','POST'])
@app.route('/see_wiki/<id>')
def see_wiki(id=None):
"""
See existing wiki page
"""
clusterID = str(request.args.get('id'))
search_result = mongo.db.paperwiki.find_one({ "DOI" : clusterID})
content = Markup(markdown.markdown(search_result['content']))
search_result['actionurl'] = "create_wiki?id=" + clusterID
resp = render_template("see_wiki.html",id=clusterID,doc=search_result,content=content)
return resp
ON_HEROKU = os.environ.get('ON_HEROKU')
if ON_HEROKU:
# get the heroku port
port = int(os.environ.get('PORT', 17995)) # as per OP comments default is 17995
else:
port = 5000
if __name__ == "__main__":
print("Running on Port 5000")
# Can change workers to num cores for better performance
app.run(host="0.0.0.0",port=port,debug=True)
| [
"jcheong0428@gmail.com"
] | jcheong0428@gmail.com |
dad329c525ed35aec8936b40d29ead6200cd9f18 | 34a0970b90981fb9e51217590fff3bb49d1287ef | /AttendanceProject/attendance_system/attendance_system/settings.py | 4637ac3eca86472603fa6720eb0c77209e98be66 | [] | no_license | Exceed788/attendanceAPI | 62069463742029539b40b27432e15baa0b963361 | d27434ad2b2ca160d4989be9dc292d3e0e83c0a1 | refs/heads/master | 2023-07-08T09:05:02.393608 | 2021-08-10T02:41:20 | 2021-08-10T02:41:20 | 386,314,918 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,314 | py | """
Django settings for attendance_system project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-*)@j-*^g8wmqz6=58_0e^wkaovmjfrv+4=ur&*aupkn=u$hwcw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api_basic',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'attendance_system.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'attendance_system.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"exceed830@gmail.com"
] | exceed830@gmail.com |
3a13070e9b6ac5fe1c0034a31b875111ff26f655 | 7302376ef455d7e072181b952e57d11b4c4365e3 | /WebProject/rango/rangoapp/form.py | cf22ddcbf700bcd6165a8a158c2488a297471887 | [] | no_license | BlessKingslayer/StartItFromPython | cb250f68a415e9695878e7e5e5344b6ed0b0cfd3 | 90d758965cb568971f47e297f7959da49b91895f | refs/heads/master | 2022-12-13T05:19:17.340583 | 2019-01-30T10:12:39 | 2019-01-30T10:12:39 | 146,078,235 | 0 | 0 | null | 2022-12-08T02:59:25 | 2018-08-25T08:14:09 | Python | UTF-8 | Python | false | false | 1,944 | py | from django import forms
from rangoapp.models import Category, Page, UserProfile
from django.contrib.auth.models import User
class CategoryForm(forms.ModelForm):
name = forms.CharField(max_length=128, help_text='请输入种类名称')
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
likes = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
# An inline class to provide additional information on the form.
class Meta:
# Provide an association between the ModelForm and a model
model = Category
fields = "__all__"
class PageForm(forms.ModelForm):
title = forms.CharField(max_length=128, help_text='请输入页面标题')
url = forms.URLField(max_length=128, help_text='请输入URL')
views = forms.IntegerField(widget=forms.HiddenInput(), initial=0)
class Meta:
model = Page
fields = ('title', 'url', 'views')
def clean(self):
cleaned_data = self.cleaned_data
url = cleaned_data.get('url')
# If url is not empty and doesn't start with 'http://', prepend 'http://'.
if url and not url.startwith('http://'):
url = 'http://' + url
cleaned_data['url'] = url
return cleaned_data
class UserForm(forms.ModelForm):
username = forms.CharField(help_text="Please enter a username.")
email = forms.CharField(help_text="Please enter your email.")
password = forms.CharField(
widget=forms.PasswordInput(), help_text="Please enter a password.")
class Meta:
model = User
fields = ('username', 'email', 'password')
class UserProfileForm(forms.ModelForm):
website = forms.URLField(
help_text="Please enter your website.", required=False)
picture = forms.ImageField(
help_text="Select a profile image to upload.", required=False)
class Meta:
model = UserProfile
fields = ('website', 'picture') | [
"836432552@qq.com"
] | 836432552@qq.com |
9cea2b983e148f75b6c54dc0f00c4461901aa4b4 | 61801ae65e00563c765bdc61b7ed499680cb9d74 | /TrainTicket.py | c5e229b542a122a33abb913276a4c2247a7386cb | [] | no_license | aes421/HackerRankChallenges | 2e183ccdf0989647f3cb38336cb01ec97807a558 | 7b3f3006327d06c921ab43fb6281decd64e269b5 | refs/heads/master | 2021-09-09T11:14:04.264143 | 2018-03-15T13:07:21 | 2018-03-15T13:07:21 | 125,367,458 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | #!/bin/python3
import os
import sys
berth = ['SUB', 'LB', 'MB', 'UB', 'LB', 'MB', 'UB', 'SLB']
#
# Complete the berthType function below.
#
def berthType(n):
return berth[n%8]
if __name__ == '__main__':
f = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input())
result = berthType(n)
f.write(result + '\n')
f.close()
| [
"aesdev421@gmail.com"
] | aesdev421@gmail.com |
de9cdc221b466b438e56e604d354af8db1542009 | 3109aaf72df47f11742aca1c5921f71e03eb9917 | /controls/views.py | 17269ee089a01d4a2c5d8d45186ee3903ba26d07 | [
"MIT"
] | permissive | kofi-teddy/accounts | a225f5639ef8993934fe69ec638d2af19d854c2d | 74633ce4038806222048d85ef9dfe97a957a6a71 | refs/heads/master | 2023-02-19T15:10:20.621628 | 2021-01-23T10:30:27 | 2021-01-23T10:30:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,867 | py | import re
from functools import reduce
from itertools import chain, groupby
from accountancy.mixins import (ResponsivePaginationMixin,
SingleObjectAuditDetailViewMixin)
from django.conf import settings
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.mixins import (LoginRequiredMixin,
PermissionRequiredMixin)
from django.contrib.auth.models import Group, User
from django.db import transaction
from django.db.models import prefetch_related_objects
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views.generic import (CreateView, DetailView, ListView,
TemplateView, UpdateView)
from nominals.models import NominalTransaction
from simple_history.utils import (bulk_create_with_history,
bulk_update_with_history)
from users.mixins import LockDuringEditMixin
from users.models import UserSession
from controls.forms import (UI_PERMISSIONS, AdjustFinancialYearFormset,
FinancialYearForm,
FinancialYearInlineFormSetCreate, GroupForm,
ModuleSettingsForm, PeriodForm, UserForm)
from controls.helpers import PermissionUI
from controls.models import FinancialYear, ModuleSettings, Period
from controls.widgets import CheckboxSelectMultipleWithDataAttr
class ControlsView(LoginRequiredMixin, TemplateView):
template_name = "controls/controls.html"
class GroupsList(LoginRequiredMixin, ResponsivePaginationMixin, ListView):
paginate_by = 25
model = Group
template_name = "controls/group_list.html"
class IndividualMixin:
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
context_data["edit"] = self.edit
return context_data
class ReadPermissionsMixin:
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
perms = self.get_perms()
perm_ui = PermissionUI(perms)
for perm in UI_PERMISSIONS()():
perm_ui.add_to_group(perm)
perm_table_rows = perm_ui.create_table_rows()
context_data["perm_table_rows"] = perm_table_rows
return context_data
class GroupDetail(
LoginRequiredMixin,
PermissionRequiredMixin,
SingleObjectAuditDetailViewMixin,
ReadPermissionsMixin,
IndividualMixin,
DetailView):
model = Group
template_name = "controls/group_detail.html"
edit = False
permission_required = "auth.view_group"
def get_perms(self):
return self.object.permissions.all()
class GroupUpdate(
LoginRequiredMixin,
PermissionRequiredMixin,
LockDuringEditMixin,
SingleObjectAuditDetailViewMixin,
IndividualMixin,
UpdateView):
model = Group
template_name = "controls/group_edit.html"
success_url = reverse_lazy("controls:groups")
form_class = GroupForm
edit = True
permission_required = "auth.change_group"
class GroupCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
model = Group
template_name = "controls/group_edit.html"
success_url = reverse_lazy("controls:groups")
form_class = GroupForm
permission_required = "auth.add_group"
class UsersList(LoginRequiredMixin, ListView):
paginate_by = 25
model = User
template_name = "controls/users_list.html"
"""
The permissions tab in the UI for the user detail and user edit shows BOTH
the permissions of the groups the user belongs to and the permissions for that particular user.
In edit mode the user only has the option to change the latter.
"""
user_fields_to_show_in_audit = [
'is_superuser',
'username',
'first_name',
'last_name',
'email',
'is_active',
]
class UserDetail(
LoginRequiredMixin,
PermissionRequiredMixin,
SingleObjectAuditDetailViewMixin,
ReadPermissionsMixin,
DetailView):
model = User
template_name = "controls/user_detail.html"
edit = False
permission_required = "auth.view_user"
ui_audit_fields = user_fields_to_show_in_audit
def get_perms(self):
user = self.object
user_perms = user.user_permissions.all()
prefetch_related_objects([user], "groups__permissions__content_type")
group_perms = [group.permissions.all() for group in user.groups.all()]
group_perms = list(chain(*group_perms))
if user_perms and group_perms:
return list(set(chain(user_perms, group_perms)))
if user_perms:
return user_perms
if group_perms:
return group_perms
class UserEdit(
LoginRequiredMixin,
PermissionRequiredMixin,
LockDuringEditMixin,
SingleObjectAuditDetailViewMixin,
IndividualMixin,
UpdateView):
model = User
form_class = UserForm
template_name = "controls/user_edit.html"
success_url = reverse_lazy("controls:users")
edit = True
permission_required = "auth.change_user"
ui_audit_fields = user_fields_to_show_in_audit
# because 5 db hits are needed for POST
@transaction.atomic
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
def get_form(self):
form = self.form_class(**self.get_form_kwargs())
user = self.object
prefetch_related_objects([user], "groups__permissions__content_type")
group_perms = [group.permissions.all()
for group in user.groups.all()] # does hit db again
group_perms = list(chain(*group_perms)) # does not hit db again
group_perms = {perm.pk: perm for perm in group_perms}
self.group_perms = group_perms
form.fields["user_permissions"].widget.group_perms = group_perms
return form
def form_valid(self, form):
groups = form.cleaned_data.get("groups")
user_permissions = form.cleaned_data.get("user_permissions")
# because the group permissions are included in the form i.e. checkboxes are ticked for
# permissions which belong to only groups and not users, we need to discount all such permissions
user_permissions = [
perm for perm in user_permissions if perm.pk not in self.group_perms]
form.instance.user_permissions.clear() # hit db
form.instance.user_permissions.add(*user_permissions) # hit db
form.instance.groups.clear() # hit db
form.instance.groups.add(*groups) # hit db
response = super().form_valid(form)
# this deletes the current user session
update_session_auth_hash(self.request, self.object)
UserSession.objects.create(
user=self.object, session_id=self.request.session.session_key)
return response
class UserCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
model = User
form_class = UserForm
template_name = "controls/user_edit.html"
success_url = reverse_lazy("controls:users")
permission_required = "auth.add_user"
def get_form(self):
self.form_class.declared_fields["user_permissions"].widget = CheckboxSelectMultipleWithDataAttr(
attrs={
"data-option-attrs": [
"codename",
"content_type__app_label",
],
}
)
form = super().get_form()
return form
class FinancialYearList(ListView):
model = FinancialYear
template_name = "controls/fy_list.html"
def convert_month_years_to_full_dates(post_data_copy):
for k, v in post_data_copy.items():
if re.search(r"month_start", k):
if v:
v = "01-" + v
if re.search(r"01-\d{2}-\d{4}", v):
post_data_copy[k] = v
return post_data_copy
class FinancialYearCreate(LoginRequiredMixin, PermissionRequiredMixin, CreateView):
model = FinancialYear
template_name = 'controls/fy_create.html'
form_class = FinancialYearForm
success_url = reverse_lazy("controls:index")
permission_required = "controls.add_financialyear"
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
if self.request.POST:
d = convert_month_years_to_full_dates(self.request.POST.copy())
context_data["periods"] = FinancialYearInlineFormSetCreate(
d, prefix="period")
else:
context_data["periods"] = FinancialYearInlineFormSetCreate(
prefix="period")
return context_data
def form_valid(self, form):
context_data = self.get_context_data()
periods = context_data["periods"]
if periods.is_valid():
fy = form.save()
self.object = fy
periods.instance = fy
periods.save(commit=False)
period_instances = [p.instance for p in periods]
period_instances.sort(key=lambda p: p.month_start)
i = 1
for period in period_instances:
period.fy_and_period = f"{fy.financial_year}{str(i).rjust(2, '0')}"
period.period = str(i).rjust(2, '0')
i = i + 1
bulk_create_with_history(
[*period_instances],
Period
)
first_period_of_fy = fy.first_period()
mod_settings = ModuleSettings.objects.first()
# when a FY is created for the first time we need to set the default
# posting periods for each posting module in the software
for setting, period in mod_settings.module_periods().items():
if not period:
setattr(mod_settings, setting, first_period_of_fy)
mod_settings.save()
return HttpResponseRedirect(self.get_success_url())
return self.render_to_response(context_data)
class FinancialYearDetail(LoginRequiredMixin, PermissionRequiredMixin, DetailView):
model = FinancialYear
template_name = "controls/fy_detail.html"
context_object_name = "financial_year"
permission_required = "controls.view_financialyear"
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
periods = self.object.periods.all()
context_data["periods"] = periods
return context_data
class AdjustFinancialYear(LoginRequiredMixin, PermissionRequiredMixin, UpdateView):
model = Period
template_name = "controls/fy_adjust.html"
form_class = AdjustFinancialYearFormset
success_url = reverse_lazy("controls:fy_list")
prefix = "period"
permission_required = "controls.change_fy"
def get_object(self):
# form is in fact a formset
# so every period object can be edited
return None
def get_success_url(self):
return self.success_url
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs.pop("instance")
kwargs["queryset"] = Period.objects.all()
return kwargs
def form_invalid(self, formset):
if any([form.non_field_errors() for form in formset]):
formset.has_non_field_errors = True
if formset.non_form_errors():
formset.has_non_field_errors = True
return super().form_invalid(formset)
def form_valid(self, formset):
formset.save(commit=False)
fy_has_changed = {} # use dict to avoid recording multiple occurences of the same
# FY being affected
for form in formset:
if 'fy' in form.changed_data:
fy_id = form.initial.get("fy")
fy_queryset = form.fields["fy"]._queryset
fy = next(fy for fy in fy_queryset if fy.pk == fy_id)
fy_has_changed[fy_id] = fy
# we need to rollback now to the earliest of the financial years which has changed
# do this before we make changes to the period objects and FY objects
fys = [fy for fy in fy_has_changed.values()]
if fys:
earliest_fy_affected = min(fys, key=lambda fy: fy.financial_year)
if earliest_fy_affected:
# because user may not in fact change anything
# if the next year after the earliest affected does not exist no exception is thrown
# the db query just won't delete anything
NominalTransaction.objects.rollback_fy(
earliest_fy_affected.financial_year + 1)
# now all the bfs have been deleted we can change the period objects
instances = [form.instance for form in formset]
fy_period_counts = {}
for fy_id, periods in groupby(instances, key=lambda p: p.fy_id):
fy_period_counts[fy_id] = len(list(periods))
fys = FinancialYear.objects.all()
for fy in fys:
fy.number_of_periods = fy_period_counts[fy.pk]
# no point auditing this
FinancialYear.objects.bulk_update(fys, ["number_of_periods"])
bulk_update_with_history(
instances, Period, ["period", "fy_and_period", "fy"])
return HttpResponseRedirect(self.get_success_url())
class ModuleSettingsUpdate(
LoginRequiredMixin,
PermissionRequiredMixin,
SingleObjectAuditDetailViewMixin,
UpdateView):
model = ModuleSettings
form_class = ModuleSettingsForm
template_name = "controls/module_settings.html"
success_url = reverse_lazy("controls:index")
permission_required = "controls.change_modulesettings"
def get_object(self):
return ModuleSettings.objects.first()
| [
"rossm6@googlemail.com"
] | rossm6@googlemail.com |
18d74a25770cf284c63d6d4c3abee3a3a5245c4b | 56f612d1466e25322da2f5d236b036c116a203e6 | /src/portfolio/csv_portfolio.py | 289b104cd9da5f54c021902a921301279907b337 | [] | no_license | nat-leo/trade5 | 5fc419a49fe76b9d74e8a4dc815033aac398f5b2 | 3937d9a151fa32ce0d024b7c46b6470a48537a69 | refs/heads/master | 2023-06-29T14:26:48.905347 | 2021-07-24T02:45:22 | 2021-07-24T02:45:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,315 | py | import csv
import ast
from src.portfolio import naive_portfolio
from src import event
class CsvPortfolio(naive_portfolio.NaivePortfolio):
"""" Naive Portfolio with a Stop Loss and Take Profit."""
def __init__(self, events, equity):
self.events = events
self.updated_list = {}
self.holdings = {}
self.history = []
self.equity = [equity]
def create_order(self, q_event):
"""
Append an OrderEvent to the queue (typically after receiving a SignalEvent
from a strategy object, or after a MarketEvent hits a stoploss or takeprofit).
"""
self.events.append(event.OrderEvent(
direction=q_event.get_direction(),
datetime=q_event.get_candle(),
ticker=q_event.get_ticker(),
quantity=1000
))
def create_single_order(self, q_event):
"""
Get a SignalEvent and enter accordingly, but also get rid of the
other holdings.
"""
self.updated_list[q_event.get_ticker()] = {
'ticker': q_event.get_ticker(),
'direction': q_event.get_direction(),
'candle': q_event.get_candle()
}
# if we're at the last signal event, process it and update the update_list
if isinstance(self.events[0], event.SignalEvent) and len(self.events) == 1:
# for each pair in the updated list, check if it's already in holdings
# if the pair is in holidngs, do nothing.
# if the pair is not in holdings,
with open("holdings.csv", 'r', newline='') as file:
reader = csv.reader(file)
for row in reader:
#print(type(ast.literal_eval(row[2])))
self.holdings[row[0]] = {
"ticker": row[0],
"direction": int(row[1]),
"candle": ast.literal_eval(row[2]), # convert row[2] string to dict
"quantity": int(row[3]),
"price": float(row[4]),
"pip_value": float(row[5]),
"margin": float(row[6]),
"stop_loss": float(row[7]),
"take_profit": float(row[8])
}
for pair in self.updated_list:
if self.updated_list[pair]['ticker'] not in self.holdings:
# enter pairs not currently holding:
self.events.append(event.OrderEvent(
direction=self.updated_list[pair]['direction'],
datetime=self.updated_list[pair]['candle'],
ticker=self.updated_list[pair]['ticker'],
quantity=1000
))
for h in self.holdings:
if h not in self.updated_list:
# leave pairs that aren't in updated list
self.events.append(event.OrderEvent(
direction=1 if self.holdings[h]['direction']==-1 else -1,
datetime=self.holdings[h]['candle'], # BIG ISSUE: this needs to be the current price, not the price entered at.
ticker=self.holdings[h]['ticker'],
quantity=self.holdings[h]['quantity']
))
self.updated_list = {}
def create_close_order(self, ticker, direction, datetime, price, quantity=1000):
"""For takeprofit / stoploss caused OrderEvents. """
print(ticker, 'closed')
self.events.append(event.OrderEvent(
direction=direction*-1,
datetime=datetime,
ticker=ticker,
price=price,
quantity=quantity
))
def update(self, q_event):
"""
After receiving a FillEvent, update internal data to the FillEvent's
specifications.
"""
if q_event.get_ticker() not in self.holdings: # add order to holdings
self.holdings[q_event.get_ticker()] = {
"ticker": q_event.get_ticker(),
"direction": q_event.get_direction(),
"candle": q_event.get_candle(),
"quantity": q_event.get_quantity(),
"price": q_event.get_price(),
"pip_value": q_event.get_pip_val(),
"margin": q_event.get_margin(),
"stop_loss": self.set_stop_loss(q_event.get_ticker(), q_event.get_direction(), q_event.get_price(), 500),
"take_profit": self.set_take_profit(q_event.get_ticker(), q_event.get_direction(), q_event.get_price(), 400)
}
else: # if an open order needs to be closed
#print(self.holdings[q_event.get_ticker()])
holding = self.holdings[q_event.get_ticker()]
self.history.append({
'ticker': holding['ticker'],
'direction': holding['direction'],
'price': holding['price'],
'return': self.calculate_return(holding['ticker'], holding['direction'], holding['price'], q_event.get_price(), holding['pip_value']),
'pip_value': holding['pip_value']
})
self.equity.append(self.equity[-1] + self.calculate_return(holding['ticker'], holding['direction'], holding['price'], q_event.get_price(), holding['pip_value']))
del self.holdings[q_event.get_ticker()]
# when done with all FILL orders, update holdings.csv to reflect the changes
if isinstance(self.events[0], event.FillEvent) and len(self.events) == 1:
with open('holdings.csv', 'w', newline='') as file:
fields = ["ticker", "direction", "candle", "quantity", "price", "pip_value", "margin", "stop_loss", "take_profit"]
writer = csv.DictWriter(file, fieldnames=fields)
for h in self.holdings:
writer.writerow({
"ticker": self.holdings[h]['ticker'],
"direction": self.holdings[h]['direction'],
"candle": self.holdings[h]['candle'],
"quantity": self.holdings[h]['quantity'],
"price": self.holdings[h]['price'],
"pip_value": self.holdings[h]['pip_value'],
"margin": self.holdings[h]['margin'],
"stop_loss": self.holdings[h]['stop_loss'],
"take_profit": self.holdings[h]['take_profit'],
})
self.holdings = {}
def check_if_close_triggered(self, q_event):
"""Takes a MarketEvent and checks if the candle would have triggered one of the
holdings to close. """
if isinstance(q_event, event.MultipleMarketEvent):
with open("holdings.csv", 'r', newline='') as file:
reader = csv.reader(file)
for row in reader:
self.holdings[row[0]] = {
"ticker": row[0],
"direction": int(row[1]),
"candle": ast.literal_eval(row[2]), # convert row[2] string to dict
"quantity": int(row[3]),
"price": float(row[4]),
"pip_value": float(row[5]),
"margin": float(row[6]),
"stop_loss": float(row[7]),
"take_profit": float(row[8])
}
for e in q_event.get_market_events():
if e.get_ticker() in self.holdings:
tick = e.get_ticker()
_dir = self.holdings[tick]['direction']
date = e.get_data()[-1]
bid = e.get_data()[-1]['bid']
ask = e.get_data()[-1]['ask']
if _dir < 0: # if short (buy ask)
if ask[1] >= self.holdings[tick]['stop_loss']:
# create an OrderEvent, pop holding out of holdings and into history
self.create_close_order(tick, _dir, date, self.holdings[tick]['stop_loss'])
elif ask[2] <= self.holdings[tick]['take_profit']:
# create an OrderEvent, pop holding out of holdings and into history
self.create_close_order(tick, _dir, date, self.holdings[tick]['take_profit'])
elif _dir > 0: # if long (sell bid)
if bid[2] <= self.holdings[tick]['stop_loss']:
self.create_close_order(tick, _dir, date, self.holdings[tick]['stop_loss'])
elif bid[1] >= self.holdings[tick]['take_profit']:
self.create_close_order(tick, _dir, date, self.holdings[tick]['take_profit'])
self.holdings = {}
'''
elif isinstance(q_event, event.MarketEvent):
if q_event.get_ticker() in self.holdings:
tick = q_event.get_ticker()
_dir = self.holdings[tick]['direction']
date = q_event.get_data()[-1]
bid = q_event.get_data()[-1]['bid']
ask = q_event.get_data()[-1]['ask']
if _dir < 0: # if short (buy bid)
if bid[1] > self.holdings[tick]['stop_loss']:
# create an OrderEvent, pop holding out of holdings and into history
self.create_close_order(tick, _dir, date, self.holdings[tick]['stop_loss'])
elif bid[2] < self.holdings[tick]['take_profit']:
# create an OrderEvent, pop holding out of holdings and into history
self.create_close_order(tick, _dir, date, self.holdings[tick]['take_profit'])
elif _dir > 0: # if long (sell ask)
if ask[2] < self.holdings[tick]['stop_loss']:
self.create_close_order(tick, _dir, date, self.holdings[tick]['stop_loss'])
elif ask[1] < self.holdings[tick]['take_profit']:
self.create_close_order(tick, _dir, date, self.holdings[tick]['take_profit'])
'''
# utility functions
def set_stop_loss(self, ticker, direction, price, pips):
if ticker.startswith('JPY') or ticker.endswith('JPY'):
if direction > 0:
sl = price - 0.01*pips
elif direction < 0:
sl = 0.01*pips + price
else:
if direction > 0:
sl = price - 0.0001*pips
elif direction < 0:
sl = 0.0001*pips + price
return sl
def set_take_profit(self, ticker, direction, price, pips):
if ticker.startswith('JPY') or ticker.endswith('JPY'):
if direction > 0:
tp = 0.01*pips + price
elif direction < 0:
tp = 0.01*pips - price
else:
if direction > 0:
tp = 0.0001*pips + price
elif direction < 0:
tp = 0.0001*pips - price
return tp
def calculate_return(self, ticker, direction, price, new_price, pip_value):
if ticker.startswith('JPY') or ticker.endswith('JPY'):
rate = 0.01
else:
rate = 0.0001
if direction > 0: # if we selling a long
return (new_price - price) / rate * pip_value
elif direction < 0: # if we covering a short
return (price - new_price) / rate * pip_value
#getters and setters
def get_all_holdings(self):
return self.holdings
def get_events(self):
return self.events
def get_equity(self):
return self.equity
def get_history(self):
return self.history
def get_holding(self, ticker):
return self.holdings[ticker]
def set_holding(self, ticker, direction, quantity, price, stop_loss, take_profit):
self.holdings[ticker] = {
'ticker': ticker,
'direction': direction,
'quantity': quantity,
'price': price,
'stop_loss': stop_loss,
'take_profit': take_profit
}
| [
"33559832+n8liu@users.noreply.github.com"
] | 33559832+n8liu@users.noreply.github.com |
19f904139fcf516707abb4535dcf87df15244ce4 | 94b0c423a4c9365d022d88e071f05360360009e0 | /resources/ticket.py | a36f54a5052eb837c601e605c4e3ff824251beba | [] | no_license | NikolaStojicic/flask_budget_tracker | 033dbddfd94a66b771f4ca97f6c0b42e907a8968 | d8d865442416250c95836b51287e4688dcefd9c4 | refs/heads/master | 2020-05-21T21:07:44.606752 | 2019-08-23T12:37:32 | 2019-08-23T12:37:32 | 186,147,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,130 | py | from flask_restplus import Resource, Api
from flask_jwt_extended import jwt_required, get_jwt_identity
from db import db
from models.ticket import Ticket
from models.user import User
api = Api()
def create_new_ticket():
new_ticket = Ticket(name=api.payload['name'], price=api.payload['price'],
description=api.payload['description'], image=api.payload['image'],
user_id=get_jwt_identity())
db.session.add(new_ticket)
db.session.commit()
class TicketResource(Resource):
@jwt_required
def post(self):
if len(api.payload['name']) == 0:
return {'msg': "Name is mandatory!"}, 400
create_new_ticket()
return {'msg': 'New ticket added'}, 200
@jwt_required
def get(self, _id=-1):
tickets = Ticket.query.filter_by(user_id=get_jwt_identity())
if _id == -1:
return {'tickets': [ticket.json() for ticket in tickets]}
for ticket in tickets:
if ticket.id == _id:
return {'ticket': ticket.json()}
return {'msg': "No ticket with such ID!"}, 404
@jwt_required
def put(self, _id=-1):
if _id == -1:
return {"msg": "Bad request!"}, 401
current_user_id = get_jwt_identity()
ticket = Ticket.find_by_id(_id, current_user_id)
if not ticket:
return {'msg': 'No such Ticket.'}, 404
ticket.name = api.payload['name']
ticket.price = api.payload['price']
ticket.description = api.payload['description']
ticket.image = api.payload['image']
ticket.user_id = get_jwt_identity()
db.session.commit()
return {'msg': ticket.json()}
@jwt_required
def delete(self, _id=-1):
if _id == -1:
return {"msg": "Bad request!"}, 401
current_user_id = get_jwt_identity()
ticket = Ticket.find_by_id(_id, current_user_id)
if not ticket:
return {'msg': "Ticket doesn't exist!"}, 404
db.session.delete(ticket)
db.session.commit()
return{'msg': "Ticket successfully deleted!"}, 200
| [
"36972658+NikolaStojicic@users.noreply.github.com"
] | 36972658+NikolaStojicic@users.noreply.github.com |
7aa849f41485efec3852ffd577bf460b229cd1a9 | 58808dac7b87c49412da0bd36ffe6608ae88083a | /venv/bin/flask | 10341b55ad8d57dad858447275d41924f2c5ed4f | [] | no_license | epiczain1234/CRUD-Notes-App | 6525886247ca2419d3db14f958470c7b12008394 | b2ddf1e77a30d6dd18fbdbee6a7624c0b9987e45 | refs/heads/master | 2023-06-10T06:52:58.845414 | 2021-06-29T22:55:31 | 2021-06-29T22:55:31 | 379,985,348 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 243 | #!/Users/gdq5/Desktop/CRUD-Notes-App/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from flask.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"gdq5@M-C02C11GWLVDL.nordstrom.net"
] | gdq5@M-C02C11GWLVDL.nordstrom.net | |
0d27d8503fd285dd8299cfa4c0ba06c1020aeb46 | 2ccecf897075ad47f465f0ff30aeb74eca15487c | /ros/src/twist_controller/pid.py | 0df64640bad8d20ab5114844dd7b2114ef33e57c | [
"MIT"
] | permissive | croepke/CarND-Capstone | f27db39cbaf5d55634b4ab78d19b1ef530e46cab | 24ec5e121baadc96d087ac5ee9619b7ab86dffb3 | refs/heads/master | 2022-11-29T21:24:10.552677 | 2019-06-15T15:30:57 | 2019-06-15T15:30:57 | 192,094,283 | 0 | 0 | MIT | 2022-11-22T00:22:47 | 2019-06-15T15:28:53 | Jupyter Notebook | UTF-8 | Python | false | false | 895 | py | import rospy
MIN_NUM = float('-inf')
MAX_NUM = float('inf')
class PID(object):
def __init__(self, kp, ki, kd, mn=MIN_NUM, mx=MAX_NUM):
self.kp = kp
self.ki = ki
self.kd = kd
self.min = mn
self.max = mx
self.int_val = self.last_error = 0.0
def reset(self):
self.int_val = 0.0
def step(self, error, sample_time):
integral = self.int_val + error * sample_time;
derivative = (error - self.last_error) / sample_time;
val = self.kp * error + self.ki * integral + self.kd * derivative;
if val > self.max:
val = self.max
elif val < self.min:
val = self.min
else:
self.int_val = integral
self.last_error = error
# rospy.logwarn("Throttle: {0}".format(val))
# rospy.logwarn("Velocity error: {0}".format(error))
return val
| [
"croepke@posteo.de"
] | croepke@posteo.de |
4aafe1f881c5b33b219068a5220f67354a33717f | c72252f96a1021ba3f9b812020b74bda258bf465 | /S12学习/day3/code/configfile.py | 8a00a0bc88d4cfb073f70be09115f0b43d8c233f | [] | no_license | yzwy1988/cloud | 0251af05b8cc2a8fffdc6f739a01ba9383353dc5 | 6e87f26497072f41b20c1b0696e5605a52987c50 | refs/heads/master | 2021-01-17T22:19:52.327370 | 2016-02-22T10:34:59 | 2016-02-22T10:34:59 | 52,455,959 | 2 | 0 | null | 2016-02-24T16:14:50 | 2016-02-24T16:14:49 | null | UTF-8 | Python | false | false | 1,267 | py | # /usr/bin/env python
# -*- coding:utf-8 -*-
# startswith 是否以某个字段开头的
import json
def check(backend):
check_list = []
flag = False
with open('back', 'r') as f:
for line in f:
if line.startswith('backend'):
if backend == line.strip().split()[1]: # strip 换行,split 去掉空格
flag = True
continue
if flag and line.startswith('backend'):
break
if flag and line.strip():
check_list.append(line)
return check_list
def add(inp_dic):
add_mess = 'server %s weight % maxconn % ' % (inp_)
def menu():
print('''
****************
1 查看数据
2 添加数据
3 删除数据
****************
''')
def main():
menu()
action = input('请选择操作序号:')
if action == '1':
backend = input('''请按如下格式输入要操作的字段:
www.oldboy.org
''')
check(backend)
if action == '2':
inp_data = input('''
请按如下格式输入要操作的字段:
server 100.1.7.9 100.1.7.9 weight 20 maxconn 3000
''')
inp_dic = json.loads()
if __name__ == '__main__':
main()
| [
"80470335@qq.com"
] | 80470335@qq.com |
4795ef435b276bc4184e9744d76255ca6cac078c | 06c496a817652aba3f067f95120a20d3cebded7e | /Coursera/Introduction_to_Data_Science_in_Python/Week+2.py | 59662e53d8b3ecd284316a01120a13830899c127 | [] | no_license | issacamara/Data_Science | 4d668ff2e91a0841a7d8c684e365321a5383f3a3 | 1f79119f9f4620a7d75d36118c468993e19d5046 | refs/heads/master | 2021-04-28T14:50:49.603162 | 2018-09-21T11:15:44 | 2018-09-21T11:15:44 | 121,975,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,271 | py |
# coding: utf-8
# ---
#
# _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-data-analysis/resources/0dhYG) course resource._
#
# ---
# # The Series Data Structure
# In[1]:
import pandas as pd
get_ipython().magic('pinfo pd.Series')
# In[2]:
animals = ['Tiger', 'Bear', 'Moose']
pd.Series(animals)
# In[3]:
numbers = [1, 2, 3]
pd.Series(numbers)
# In[4]:
animals = ['Tiger', 'Bear', None]
pd.Series(animals)
# In[5]:
numbers = [1, 2, None]
pd.Series(numbers)
# In[6]:
import numpy as np
np.nan == None
# In[7]:
np.nan == np.nan
# In[8]:
np.isnan(np.nan)
# In[9]:
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports)
s
# In[10]:
s.index
# In[11]:
s = pd.Series(['Tiger', 'Bear', 'Moose'], index=['India', 'America', 'Canada'])
s
# In[12]:
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports, index=['Golf', 'Sumo', 'Hockey'])
s
# # Querying a Series
# In[13]:
sports = {'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'}
s = pd.Series(sports)
s
# In[14]:
s.iloc[3]
# In[15]:
s.loc['Golf']
# In[16]:
s[3]
# In[17]:
s['Golf']
# In[21]:
sports = {99: 'Bhutan',
100: 'Scotland',
101: 'Japan',
102: 'South Korea'}
s = pd.Series(sports)
# In[22]:
s[0] #This won't call s.iloc[0] as one might expect, it generates an error instead
# In[23]:
s = pd.Series([100.00, 120.00, 101.00, 3.00])
s
# In[24]:
total = 0
for item in s:
total+=item
print(total)
# In[25]:
import numpy as np
total = np.sum(s)
print(total)
# In[26]:
#this creates a big series of random numbers
s = pd.Series(np.random.randint(0,1000,10000))
s.head()
# In[27]:
len(s)
# In[28]:
get_ipython().run_cell_magic('timeit', '-n 100', 'summary = 0\nfor item in s:\n summary+=item')
# In[29]:
get_ipython().run_cell_magic('timeit', '-n 100', 'summary = np.sum(s)')
# In[30]:
s+=2 #adds two to each item in s using broadcasting
s.head()
# In[31]:
for label, value in s.iteritems():
s.set_value(label, value+2)
s.head()
# In[32]:
get_ipython().run_cell_magic('timeit', '-n 10', 's = pd.Series(np.random.randint(0,1000,10000))\nfor label, value in s.iteritems():\n s.loc[label]= value+2')
# In[33]:
get_ipython().run_cell_magic('timeit', '-n 10', 's = pd.Series(np.random.randint(0,1000,10000))\ns+=2')
# In[34]:
s = pd.Series([1, 2, 3])
s.loc['Animal'] = 'Bears'
s
# In[35]:
original_sports = pd.Series({'Archery': 'Bhutan',
'Golf': 'Scotland',
'Sumo': 'Japan',
'Taekwondo': 'South Korea'})
cricket_loving_countries = pd.Series(['Australia',
'Barbados',
'Pakistan',
'England'],
index=['Cricket',
'Cricket',
'Cricket',
'Cricket'])
all_countries = original_sports.append(cricket_loving_countries)
# In[36]:
original_sports
# In[37]:
cricket_loving_countries
# In[38]:
all_countries
# In[39]:
all_countries.loc['Cricket']
# # The DataFrame Data Structure
# In[40]:
import pandas as pd
purchase_1 = pd.Series({'Name': 'Chris',
'Item Purchased': 'Dog Food',
'Cost': 22.50})
purchase_2 = pd.Series({'Name': 'Kevyn',
'Item Purchased': 'Kitty Litter',
'Cost': 2.50})
purchase_3 = pd.Series({'Name': 'Vinod',
'Item Purchased': 'Bird Seed',
'Cost': 5.00})
df = pd.DataFrame([purchase_1, purchase_2, purchase_3], index=['Store 1', 'Store 1', 'Store 2'])
df.head()
# In[41]:
df.loc['Store 2']
# In[42]:
type(df.loc['Store 2'])
# In[43]:
df.loc['Store 1']
# In[44]:
df.loc['Store 1', 'Cost']
# In[45]:
df.T
# In[46]:
df.T.loc['Cost']
# In[47]:
df['Cost']
# In[48]:
df.loc['Store 1']['Cost']
# In[49]:
df.loc[:,['Name', 'Cost']]
# In[50]:
df.drop('Store 1')
# In[51]:
df
# In[52]:
copy_df = df.copy()
copy_df = copy_df.drop('Store 1')
copy_df
# In[53]:
get_ipython().magic('pinfo copy_df.drop')
# In[54]:
del copy_df['Name']
copy_df
# In[55]:
df['Location'] = None
df
# # Dataframe Indexing and Loading
# In[56]:
costs = df['Cost']
costs
# In[57]:
costs+=2
costs
# In[ ]:
df
# In[ ]:
get_ipython().system('cat olympics.csv')
# In[7]:
df = pd.read_csv('olympics.csv')
df.head()
# In[13]:
df = pd.read_csv('olympics.csv', index_col = 0, skiprows=1)
df.head()
# In[14]:
df.columns
# In[16]:
for col in df.columns:
if col[:2]=='01':
df.rename(columns={col:'Gold' + col[4:]}, inplace=True)
if col[:2]=='02':
df.rename(columns={col:'Silver' + col[4:]}, inplace=True)
if col[:2]=='03':
df.rename(columns={col:'Bronze' + col[4:]}, inplace=True)
if col[:1]=='№':
df.rename(columns={col:'#' + col[1:]}, inplace=True)
df.head()
# # Querying a DataFrame
# In[17]:
df['Gold'] > 0
# In[ ]:
only_gold = df.where(df['Gold'] > 0)
only_gold.head()
# In[29]:
only_gold['Gold'].count()
# In[ ]:
df['Gold'].count()
# In[ ]:
only_gold = only_gold.dropna()
only_gold.head()
# In[ ]:
only_gold = df[df['Gold'] > 0]
only_gold.head()
# In[ ]:
len(df[(df['Gold'] > 0) | (df['Gold.1'] > 0)])
# In[ ]:
df[(df['Gold.1'] > 0) & (df['Gold'] == 0)]
# # Indexing Dataframes
# In[22]:
df.head()
# In[21]:
df['country'] = df.index
df = df.set_index('Gold')
df.head()
# In[ ]:
df = df.reset_index()
df.head()
# In[23]:
df = pd.read_csv('census.csv')
df.head()
# In[28]:
df['SUMLEV'].unique()
# In[24]:
df=df[df['SUMLEV'] == 50]
df.head()
# In[25]:
columns_to_keep = ['STNAME',
'CTYNAME',
'BIRTHS2010',
'BIRTHS2011',
'BIRTHS2012',
'BIRTHS2013',
'BIRTHS2014',
'BIRTHS2015',
'POPESTIMATE2010',
'POPESTIMATE2011',
'POPESTIMATE2012',
'POPESTIMATE2013',
'POPESTIMATE2014',
'POPESTIMATE2015']
df = df[columns_to_keep]
df.head()
df.index
# In[26]:
df = df.set_index(['STNAME', 'CTYNAME'])
df.head()
# In[27]:
df.loc['Michigan', 'Washtenaw County']
# In[ ]:
df.loc[ [('Michigan', 'Washtenaw County'),
('Michigan', 'Wayne County')] ]
# # Missing values
# In[ ]:
df = pd.read_csv('log.csv')
df
# In[ ]:
get_ipython().magic('pinfo df.fillna')
# In[ ]:
df = df.set_index('time')
df = df.sort_index()
df
# In[ ]:
df = df.reset_index()
df = df.set_index(['time', 'user'])
df
# In[ ]:
df = df.fillna(method='ffill')
df.head()
| [
"isskamara@live.fr"
] | isskamara@live.fr |
f9edc8d9a223c008a70ef3224c3054621286d518 | 12258001571bd504223fbf4587870960fa93a46d | /client/config.py | a629d5d3999e56e775ec3430d476a68ae01ea7a4 | [] | no_license | Nik0las1984/mud-obj | 0bd71e71855a9b0f0d3244dec2c877bd212cdbd2 | 5d74280724ff6c6ac1b2d3a7c86b382e512ecf4d | refs/heads/master | 2023-01-07T04:12:33.472377 | 2019-10-11T09:10:14 | 2019-10-11T09:10:14 | 69,223,190 | 2 | 0 | null | 2022-12-26T20:15:20 | 2016-09-26T07:11:49 | Python | UTF-8 | Python | false | false | 190 | py | # coding=utf-8
auto_login = False
try:
from local_config import *
except ImportError, e:
print 'Unable to load local_config.py:', e
if 'plugins' not in locals():
plugins = []
| [
"kolya.khokhlov@gmail.com"
] | kolya.khokhlov@gmail.com |
6d7330abeb85dd4954ae55bd45295a5be17a49bd | fffb732290af97687ea3221ce4a6ce4d95640aff | /courses/w10_opencv/source/OpenCV_in_Ubuntu/Python/mycam_02.py | a69e21c219e5ed6a45cf86fad76f32c973c641fb | [] | no_license | NamWoo/self_driving_car | 851de73ae909639e03756eea4d49ab663447fc19 | cd5c1142c9e543e607ca9dc258f689de6879d207 | refs/heads/master | 2021-07-24T19:51:54.459485 | 2021-07-06T13:58:19 | 2021-07-06T13:58:19 | 186,267,543 | 9 | 7 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | import numpy as np
import cv2
def receive():
cap = cv2.VideoCapture('udpsrc port=5200 caps=application/x-rtp,media=(string)video,clock-rate=(int)90000,encoding-name=(string)H264,payload=(int)96!rtph264depay!decodebin!videoconvert!appsink',cv2.CAP_GSTREAMER)
while True:
ret,frame = cap.read()
if not ret:
print('empty frame')
continue
cv2.imshow('receive', frame)
if cv2.waitKey(1)&0xFF == ord('q'):
break
cap.release()
receive(); | [
"pre3ice@gmail.com"
] | pre3ice@gmail.com |
7bb9131882afa35e0892ce6fd0c9ffc4a461be4d | e561f0532d2559064abae2d39271aa4fc2a34b2a | /Factorial.py | 785d1a6f75de2472f6ddc3cf3a14e5d11da67e99 | [] | no_license | Angel-Saez-Gonzalez/module4 | 09f7c6bd42917f61bc1dbb515ce6608e8f4c2fce | 5ecd77a2fe5def13dd2d59abcce8e0214731d4b9 | refs/heads/main | 2023-08-25T22:25:29.547848 | 2021-10-25T06:56:01 | 2021-10-25T06:56:01 | 420,906,355 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | def factorial(num):
are = 1
for i in range(1, num+1):
are *= i
return are
num = int(input("Input a number: "))
print(factorial(num))
| [
"angelsaez251@gmail.com"
] | angelsaez251@gmail.com |
da183faec87314655b87ce430d6c703df9991366 | 4ef688b93866285bcc27e36add76dc8d4a968387 | /moto/ds/responses.py | 46d204c1e27ec3b9a35fcf38df9cfb7e7319d764 | [
"Apache-2.0"
] | permissive | localstack/moto | cec77352df216cac99d5e0a82d7ada933950a0e6 | b0b2947e98e05d913d7ee2a0379c1bec73f7d0ff | refs/heads/localstack | 2023-09-01T05:18:16.680470 | 2023-07-10T09:00:26 | 2023-08-07T14:10:06 | 118,838,444 | 22 | 42 | Apache-2.0 | 2023-09-07T02:07:17 | 2018-01-25T00:10:03 | Python | UTF-8 | Python | false | false | 6,529 | py | """Handles Directory Service requests, invokes methods, returns responses."""
import json
from moto.core.exceptions import InvalidToken
from moto.core.responses import BaseResponse
from moto.ds.exceptions import InvalidNextTokenException
from moto.ds.models import ds_backends, DirectoryServiceBackend
class DirectoryServiceResponse(BaseResponse):
"""Handler for DirectoryService requests and responses."""
def __init__(self) -> None:
super().__init__(service_name="ds")
@property
def ds_backend(self) -> DirectoryServiceBackend:
"""Return backend instance specific for this region."""
return ds_backends[self.current_account][self.region]
def connect_directory(self) -> str:
"""Create an AD Connector to connect to a self-managed directory."""
name = self._get_param("Name")
short_name = self._get_param("ShortName")
password = self._get_param("Password")
description = self._get_param("Description")
size = self._get_param("Size")
connect_settings = self._get_param("ConnectSettings")
tags = self._get_param("Tags", [])
directory_id = self.ds_backend.connect_directory(
region=self.region,
name=name,
short_name=short_name,
password=password,
description=description,
size=size,
connect_settings=connect_settings,
tags=tags,
)
return json.dumps({"DirectoryId": directory_id})
def create_directory(self) -> str:
"""Create a Simple AD directory."""
name = self._get_param("Name")
short_name = self._get_param("ShortName")
password = self._get_param("Password")
description = self._get_param("Description")
size = self._get_param("Size")
vpc_settings = self._get_param("VpcSettings")
tags = self._get_param("Tags", [])
directory_id = self.ds_backend.create_directory(
region=self.region,
name=name,
short_name=short_name,
password=password,
description=description,
size=size,
vpc_settings=vpc_settings,
tags=tags,
)
return json.dumps({"DirectoryId": directory_id})
def create_alias(self) -> str:
"""Create an alias and assign the alias to the directory."""
directory_id = self._get_param("DirectoryId")
alias = self._get_param("Alias")
response = self.ds_backend.create_alias(directory_id, alias)
return json.dumps(response)
def create_microsoft_ad(self) -> str:
"""Create a Microsoft AD directory."""
name = self._get_param("Name")
short_name = self._get_param("ShortName")
password = self._get_param("Password")
description = self._get_param("Description")
vpc_settings = self._get_param("VpcSettings")
edition = self._get_param("Edition")
tags = self._get_param("Tags", [])
directory_id = self.ds_backend.create_microsoft_ad(
region=self.region,
name=name,
short_name=short_name,
password=password,
description=description,
vpc_settings=vpc_settings,
edition=edition,
tags=tags,
)
return json.dumps({"DirectoryId": directory_id})
def delete_directory(self) -> str:
"""Delete a Directory Service directory."""
directory_id_arg = self._get_param("DirectoryId")
directory_id = self.ds_backend.delete_directory(directory_id_arg)
return json.dumps({"DirectoryId": directory_id})
def describe_directories(self) -> str:
"""Return directory info for the given IDs or all IDs."""
directory_ids = self._get_param("DirectoryIds")
next_token = self._get_param("NextToken")
limit = self._get_int_param("Limit")
try:
(directories, next_token) = self.ds_backend.describe_directories(
directory_ids, next_token=next_token, limit=limit
)
except InvalidToken as exc:
raise InvalidNextTokenException() from exc
response = {"DirectoryDescriptions": [x.to_dict() for x in directories]}
if next_token:
response["NextToken"] = next_token
return json.dumps(response)
def disable_sso(self) -> str:
"""Disable single-sign on for a directory."""
directory_id = self._get_param("DirectoryId")
username = self._get_param("UserName")
password = self._get_param("Password")
self.ds_backend.disable_sso(directory_id, username, password)
return ""
def enable_sso(self) -> str:
"""Enable single-sign on for a directory."""
directory_id = self._get_param("DirectoryId")
username = self._get_param("UserName")
password = self._get_param("Password")
self.ds_backend.enable_sso(directory_id, username, password)
return ""
def get_directory_limits(self) -> str:
"""Return directory limit information for the current region."""
limits = self.ds_backend.get_directory_limits()
return json.dumps({"DirectoryLimits": limits})
def add_tags_to_resource(self) -> str:
"""Add or overwrite on or more tags for specified directory."""
resource_id = self._get_param("ResourceId")
tags = self._get_param("Tags")
self.ds_backend.add_tags_to_resource(resource_id=resource_id, tags=tags)
return ""
def remove_tags_from_resource(self) -> str:
"""Removes tags from a directory."""
resource_id = self._get_param("ResourceId")
tag_keys = self._get_param("TagKeys")
self.ds_backend.remove_tags_from_resource(
resource_id=resource_id, tag_keys=tag_keys
)
return ""
def list_tags_for_resource(self) -> str:
"""Lists all tags on a directory."""
resource_id = self._get_param("ResourceId")
next_token = self._get_param("NextToken")
limit = self._get_param("Limit")
try:
tags, next_token = self.ds_backend.list_tags_for_resource(
resource_id=resource_id, next_token=next_token, limit=limit
)
except InvalidToken as exc:
raise InvalidNextTokenException() from exc
response = {"Tags": tags}
if next_token:
response["NextToken"] = next_token
return json.dumps(response)
| [
"noreply@github.com"
] | localstack.noreply@github.com |
e644b72071f0266f58d01cbde8d635059dae977e | 1fab17f2de087c6da8217ca0bf9530e11167c43f | /ar/autoregressive.py | c412e3ba9a67e91436975788cf4bd44d0a599ad4 | [] | no_license | jiangnanhugo/COMP551-Fall2016-Project4 | 79091c514be3d6716080be07e37835c2b9bd27e4 | 13374b529eab55e8d2c3724c2e66dfc33b68fffd | refs/heads/master | 2021-01-01T06:37:39.428307 | 2016-12-10T05:02:06 | 2016-12-10T05:02:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | import numpy as np
from sklearn import linear_model
# model = linear_model.LinearRegression()
# model = linear_model.Ridge(alpha = 1.0, max_iter = None, tol = 0.001)
model = linear_model.Lasso(alpha=0.1)
if __name__ == "__main__":
X = [[1,2],[3,4],[5,6]]
y = [1.5, 3.5, 5.5]
X_test = [[1,2], [7, 8], [9, 10]]
y_test = [1.5, 7.5, 9.5]
print "Here"
model.fit(X, y)
print "Square mean error: %s" % np.mean((model.predict(X_test) - y_test)**2)
print model.coef_
print model.intercept_ | [
"hptruong93@gmail.com"
] | hptruong93@gmail.com |
2825781128878115e1eede94a23b6e94b3bdedb0 | ff8103f0dc01fe33bc9ebdb90132242d6e34eaf6 | /Sample/Sockets/WebServer1.py | 1572261768ccfc670e330fade93865b9294bfe4e | [] | no_license | KumaKuma0421/PatchWorks | 866aec10e1b04d2d0bda2d8ccd646a31db8e2b35 | 22bd8c0cce0b73ad7c20c2817f734c5cdf54345c | refs/heads/master | 2023-01-06T21:04:25.248769 | 2020-11-03T07:14:14 | 2020-11-03T07:14:14 | 295,703,340 | 0 | 0 | null | 2020-11-03T07:14:15 | 2020-09-15T11:18:42 | Python | UTF-8 | Python | false | false | 252 | py | #
# sa https://qiita.com/__init__/items/5c89fa5b37b8c5ed32a4
#
import http.server
import socketserver
HOST = '127.0.0.1'
PORT = 8000
with socketserver.TCPServer((HOST, PORT), http.server.SimpleHTTPRequestHandler) as httpd:
httpd.serve_forever()
| [
"noreply@github.com"
] | KumaKuma0421.noreply@github.com |
b9876d186919a820991514bb11f9d3620e1f0181 | fab1184022b96ff08276328430055dffba2af4f4 | /practica3_departamentosbd/venv/Lib/site-packages/pyxnat/core/uriutil.py | 48340cb44be8617fc78c6a74821f711043d3b14e | [] | no_license | ALJ00/practica_3_bases_de_datos | a03666834c706be0dcbd58adb37ee79503266986 | 42db1be136dae1b92bc069c79e794568b2591ea6 | refs/heads/master | 2020-05-06T13:34:34.999266 | 2019-04-24T19:03:07 | 2019-04-24T19:03:07 | 180,143,947 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,030 | py | import os
import re
from .schema import rest_translation
# from .schema import resources_types
def translate_uri(uri):
segs = uri.split('/')
for key in rest_translation.keys():
if key in segs[-2:]:
uri = uri.replace(key, rest_translation[key])
return uri
def inv_translate_uri(uri):
inv_table = dict(zip(rest_translation.values(), rest_translation.keys()))
for key in inv_table.keys():
uri = uri.replace('/%s' % key, '/%s' % inv_table[key])
return uri
def join_uri(uri, *segments):
return '/'.join(uri.split('/') + \
[seg.lstrip('/') for seg in segments]).rstrip('/')
def uri_last(uri):
# return uri.split(uri_parent(uri))[1].strip('/')
return uri.split('/')[-1]
def uri_nextlast(uri):
# return uri_last(uri.split(uri_last(uri))[0].strip('/'))
# support files in a hierarchy
if '/files/' in uri:
return 'files'
return uri.split('/')[-2]
def uri_parent(uri):
# parent = uri
# if not os.path.split(uri)[1] in resources_types:
# while os.path.split(parent)[1] not in resources_types:
# parent = os.path.split(parent)[0]
# return parent
# support files in a hierarchy by stripping all but one level
files_index = uri.find('/files/')
if files_index >= 0:
uri = uri[:7+files_index]
return uri_split(uri)[0]
def uri_grandparent(uri):
return uri_parent(uri_parent(uri))
def uri_split(uri):
return uri.rsplit('/', 1)
def uri_segment(uri, start=None, end=None):
if start is None and end is None:
return uri
elif start is None:
return '/'+'/'.join(uri.split('/')[:end])
elif end is None:
return '/'+'/'.join(uri.split('/')[start:])
else:
return '/'+'/'.join(uri.split('/')[start:end])
def uri_shape(uri):
kwid_map = dict(zip(uri.split('/')[1::2], uri.split('/')[2::2]))
shapes = {}
for kw in kwid_map:
seps = kwid_map[kw]
for char in re.findall('[a-zA-Z0-9]', seps):
seps = seps.replace(char, '')
chunks = []
for chunk in re.split('|'.join(seps), kwid_map[kw]):
try:
float(chunk)
chunk = '*'
except:
pass
chunks.append(chunk)
shapes[kw] = '?'.join(chunks)
return make_uri(shapes)
def make_uri(_dict):
uri = ''
kws = ['projects', 'subjects', 'experiments', 'assessors',
'reconstructions', 'scans', 'resources', 'in_resources',
'out_resources', 'files', 'in_files', 'out_files']
for kw in kws:
if _dict.has_key(kw):
uri += '/%s/%s' % (kw, _dict.get(kw))
return uri
def check_entry(func):
def inner(*args, **kwargs):
args[0]._intf._get_entry_point()
return func(*args, **kwargs)
return inner
def extract_uri(uri) :
"""
Destructure the given REST uri into project,subject and experiment.
Returns None if any one of project,subject or experiment is unspecified in the URI and a
(project,subject,experiment) triple otherwise.
"""
# elements in URLs are always separated by /, regardless of client
split = uri.split('/')
# a well qualified uri has a project subject, and experiment name
# so when split the following items should be present:
# ['', 'data', 'projects', 'project-name', 'subjects', 'subject-name', 'experiments', 'experiment-name', 'scans']
# Based on the above comment if there aren't 9 items in the split list the uri isn't well qualified
if (len(split) != 9): return None
project = split[3]
subject = split[5]
experiment = split[7]
return (project,subject,experiment)
def file_path(uri):
"""return the relative path of the file in the given URI
for uri = '/.../files/a/b/c', return 'a/b/c'
raises ValueError (through .index()) if '/files/' is not in the URI
"""
return uri[7+uri.index('/files/'):]
| [
"armasjose1980@gmail.com"
] | armasjose1980@gmail.com |
6413eb2dadd5b93ba6b9eebbdfd48076c2f8043b | 8cbc374010bc409d77db46dc7765e8e947a1a785 | /discord_bot_template/src/generic_key_retriever.py | 6aa6ee1bc2f6346ae8b661f6675bc84f27c72790 | [] | no_license | jzcdx/discord_bot_template | 0b4c5e49cc266e7455ce52171c7e9152cea17c88 | 1855c4a2286f2a06ed410d479e4a129df5b25632 | refs/heads/master | 2023-03-08T00:09:09.224130 | 2020-05-01T20:02:34 | 2020-05-01T20:02:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | """
===========================================
Author: Codiacs
Github: github.com/MicroOptimization
===========================================
"""
def get_key():
key = "<Insert key here>"
return key | [
"jackiezhen538@gmail.com"
] | jackiezhen538@gmail.com |
7bd0b6ac23924825a6c0ab2796909df84a0aa519 | dbc1695a046e9f2f431ff05b268716c239f3ee7b | /utils/util.py | fa7fd4e17be80db353753f3435b96cb87e33a502 | [
"MIT"
] | permissive | huiwy/UNO-Agents | 8f6302fabdd1a6d376cd527a74ca3c851959ddb7 | e6ede8e66309beb3eae7848cdfed9dc0b6f89d09 | refs/heads/main | 2023-02-18T21:43:42.479224 | 2021-01-23T10:42:22 | 2021-01-23T10:42:22 | 318,509,465 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | import numpy as np
import copy
from random import shuffle
from utils import constants
def initialize_deck(current_hand, shuff = True):
deck = [constants.CARD2INT[c] for c in constants.DECK]
shuffle(deck)
for i in range(len(current_hand)):
for _ in range(current_hand[i]):
deck.remove(i)
# print(deck)
# print(current_hand)
return deck
| [
"huiwy@shanghaitech.edu.cn"
] | huiwy@shanghaitech.edu.cn |
71dd5d4ae9054c7327937c6f24d5a798d48cd041 | 0a29ee10157c189bf351f4ebff315e490f1ad58a | /manage.py | 0f73675ba92bd94cbb8ca4baaec83d2d368b87a8 | [] | no_license | PolarisStar/servidor | 57dd0ec4316c2fb2ccbd56e44e217e8de8e4dbc3 | 6df54ebb36cec0b9d6c8dbd702df1e3e77d4a781 | refs/heads/master | 2020-04-08T22:40:51.887533 | 2018-11-30T18:09:18 | 2018-11-30T18:09:18 | 159,788,914 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MisperrisApi.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"d.arandap@alumnos.duoc.cl"
] | d.arandap@alumnos.duoc.cl |
49919addd199e8a7aff5d7ceb03465d0ee8fa6c8 | 3da6b8a0c049a403374e787149d9523012a1f0fc | /网易云课堂/Python办公自动化实战/01_开启自动化人生/batch_docs.py | d407f2929fd181400dee176ff02cc8571a3889b9 | [] | no_license | AndersonHJB/PyCharm_Coder | d65250d943e84b523f022f65ef74b13e7c5bc348 | 32f2866f68cc3a391795247d6aba69a7156e6196 | refs/heads/master | 2022-07-25T11:43:58.057376 | 2021-08-03T02:50:01 | 2021-08-03T02:50:01 | 348,922,058 | 3 | 3 | null | 2021-09-05T02:20:10 | 2021-03-18T02:57:16 | Python | UTF-8 | Python | false | false | 790 | py | # -*- coding: utf-8 -*-
# @Time : 2021/5/6 8:22 下午
# @Author : AI悦创
# @FileName: batch_docs.py.py
# @Software: PyCharm
# @Blog :http://www.aiyc.top
# @公众号 :AI悦创
from docx import Document # 创建文档
from docx.oxml.ns import qn # 中文
from docx.enum.text import WD_PARAGRAPH_ALIGNMENT # 段落
from docx.shared import Pt, RGBColor, Mm, Cm # 大小磅数/字号
import random
import qrcode
from openpyxl import load_workbook
import xlrd
def qr_code():
# 生成签到码字
signin_code = random.randint(1000, 9999)
img = qrcode.make('%s' % signin_code)
filename = '%s.png' % signin_code
img.save('qr/%s' % filename)
return filename
def excel_read():
file = xlrd.open_workbook('students.xlsx')
sheet = file.sheet_by_name(file.sheet_names()[0]) | [
"1432803776@qq.com"
] | 1432803776@qq.com |
7a9a72dc5b1d6ef4d3750f1c1424749265b51a1f | 9ce16cc0c5962159677dc87366a64a6a673e6bc6 | /applicant/forms.py | 8bc9c27791f7a6dbf31386c61d8f053bee68db0d | [] | no_license | Code414/Admission-Portal | 8a2dca2c2ef4ffdbe9db2d6ad751ee139f53ebd0 | b6332eb213bd6b1bdb272847cfea9f149405dc24 | refs/heads/master | 2023-06-02T12:21:26.075901 | 2021-06-21T13:19:10 | 2021-06-21T13:19:10 | 378,937,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | from django import forms
from django.forms import ModelForm
from django.forms.models import inlineformset_factory
from .models import ApplicantPrevEducation, ApplicantProfile
from django.contrib.auth import get_user_model
class DateInput(forms.DateInput):
input_type = 'date'
class ApplicantProfileForm(ModelForm):
present_address = forms.CharField(
widget=forms.Textarea(attrs={'rows': 3, 'cols': 40}))
permanent_address = forms.CharField(
widget=forms.Textarea(attrs={'rows': 3, 'cols': 40}))
class Meta:
model = ApplicantProfile
exclude = ('owner',)
widgets = {
'birth_date': DateInput(),
}
class ApplicantPrevEducationForm(ModelForm):
class Meta:
model = ApplicantPrevEducation
fields = '__all__'
ApplicantPrevEducationFormSet = inlineformset_factory(
ApplicantProfile, ApplicantPrevEducation, form=ApplicantPrevEducationForm, extra=1)
| [
"lit2019030@iiitl.ac.in"
] | lit2019030@iiitl.ac.in |
7ca5400535245515d6542cb21cabe3ef93f2b327 | c66366bb0013f41d7265ca152fbd15e74c1b7a1c | /datasets/environment/__init__.py | 28997794639e288e26c65bb5389f2bb9a3dde9de | [] | no_license | minhanp/bidireaction-trajectory-prediction | 404877d3f31720075809699fa917c70f93549250 | 296a50126cd50a1d4a0395696a0567575c4d4df8 | refs/heads/main | 2023-08-11T20:21:38.457470 | 2021-02-06T02:44:05 | 2021-02-06T02:44:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 335 | py | from .data_structures import RingBuffer, SingleHeaderNumpyArray, DoubleHeaderNumpyArray
from .scene import Scene
from .node import Node
from .scene_graph import TemporalSceneGraph, SceneGraph
from .environment import Environment
from .node_type import NodeTypeEnum
from .data_utils import derivative_of
# from .map import GeometricMap
| [
"brianyao@bane.engin.umich.edu"
] | brianyao@bane.engin.umich.edu |
350af117e6724fea079a34082b71c480c9815c5e | ade5e03f09f61be83380997532cdcfb93ac34fc8 | /blog/urls.py | 7e3e41f1635811aa60289832d9c79942815646a9 | [] | no_license | kwonnaseong/Food-calorie-calculation-app | 0400bde5514ebe3df8b895d628b8313cbabd0377 | a81977754b6ffaaef0605670f82556e55a24b00b | refs/heads/main | 2023-08-26T02:22:04.278178 | 2021-11-04T10:25:58 | 2021-11-04T10:25:58 | 384,023,141 | 2 | 2 | null | 2021-07-15T09:23:21 | 2021-07-08T06:24:57 | Python | UTF-8 | Python | false | false | 290 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.blog),
path('post_list/', views.post_list),
path('randomrecipe/', views.recipe),
path('create/', views.create, name='create'),
path('detail/<int:pk>/', views.detail, name='detail'),
]
| [
"rhkd865@naver.com"
] | rhkd865@naver.com |
142d52ca9c1eefcf1920bcf440428ffc4f039da6 | e9c9e38ed91969df78bbd7f9ca2a0fdb264d8ddb | /lib/python3.8/site-packages/ansible_collections/fortinet/fortimanager/plugins/modules/fmgr_fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist.py | 92edc1ea33c0ac79f071983a1fb2e9e4be4ab7a5 | [] | no_license | Arceusir/PRELIM_SKILLS_EXAM | 882fcf2868926f0bbfe1fb18d50e5fe165936c02 | b685c5b28d058f59de2875c7579739c545df2e0c | refs/heads/master | 2023-08-15T07:30:42.303283 | 2021-10-09T01:27:19 | 2021-10-09T01:27:19 | 415,167,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,077 | py | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist
short_description: Advertised prefix list.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
vlan:
description: the parameter (vlan) in requested url
type: str
required: true
dynamic_mapping:
description: the parameter (dynamic_mapping) in requested url
type: str
required: true
fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist:
description: the top level parameters set
required: false
type: dict
suboptions:
autonomous-flag:
type: str
description: no description
choices:
- 'disable'
- 'enable'
dnssl:
description: no description
type: str
onlink-flag:
type: str
description: no description
choices:
- 'disable'
- 'enable'
preferred-life-time:
type: int
description: no description
prefix:
type: str
description: no description
rdnss:
description: no description
type: str
valid-life-time:
type: int
description: no description
'''
EXAMPLES = '''
- hosts: fortimanager-inventory
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Advertised prefix list.
fmgr_fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist:
bypass_validation: False
workspace_locking_adom: <value in [global, custom adom including root]>
workspace_locking_timeout: 300
rc_succeeded: [0, -2, -3, ...]
rc_failed: [-2, -3, ...]
adom: <your own value>
vlan: <your own value>
dynamic_mapping: <your own value>
state: <value in [present, absent]>
fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist:
autonomous-flag: <value in [disable, enable]>
dnssl: <value of string>
onlink-flag: <value in [disable, enable]>
preferred-life-time: <value of integer>
prefix: <value of string>
rdnss: <value of string>
valid-life-time: <value of integer>
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/global/obj/fsp/vlan/{vlan}/dynamic_mapping/{dynamic_mapping}/interface/ipv6/ip6-prefix-list',
'/pm/config/adom/{adom}/obj/fsp/vlan/{vlan}/dynamic_mapping/{dynamic_mapping}/interface/ipv6/ip6-prefix-list'
]
perobject_jrpc_urls = [
'/pm/config/global/obj/fsp/vlan/{vlan}/dynamic_mapping/{dynamic_mapping}/interface/ipv6/ip6-prefix-list/{ip6-prefix-list}',
'/pm/config/adom/{adom}/obj/fsp/vlan/{vlan}/dynamic_mapping/{dynamic_mapping}/interface/ipv6/ip6-prefix-list/{ip6-prefix-list}'
]
url_params = ['adom', 'vlan', 'dynamic_mapping']
module_primary_key = None
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'vlan': {
'required': True,
'type': 'str'
},
'dynamic_mapping': {
'required': True,
'type': 'str'
},
'fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist': {
'required': False,
'type': 'dict',
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'options': {
'autonomous-flag': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'dnssl': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'onlink-flag': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'choices': [
'disable',
'enable'
],
'type': 'str'
},
'preferred-life-time': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
},
'prefix': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'rdnss': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'valid-life-time': {
'required': False,
'revision': {
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'int'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'fsp_vlan_dynamicmapping_interface_ipv6_ip6prefixlist'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
| [
"aaronchristopher.dalmacio@gmail.com"
] | aaronchristopher.dalmacio@gmail.com |
908b171f04e0be993584b3a8f894a92461006315 | 29c36a3c89ee2e407135bcac2bcd10a60bc7cead | /sales/urls.py | b7799317b806ed670c9bbf5e8bfaa7fc3e9985e5 | [] | no_license | thiagomarcal1984/reports_proj | 230003569064c6aebba4821227004391f80ecb73 | 080edcc91cc237b00a7882ee89845229a4f0cd86 | refs/heads/master | 2023-04-19T13:00:25.720286 | 2021-05-07T01:14:16 | 2021-05-07T01:14:16 | 360,244,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | from django.urls import path
from .views import (
SalesListView,
SalesDetailView,
home_view,
)
app_name = 'sales'
urlpatterns = [
path('', home_view, name='home'),
path('sales/', SalesListView.as_view(), name='list'),
path('sales/<pk>', SalesDetailView.as_view(), name='detail'),
] | [
"thiagomarcal1984@gmail.com"
] | thiagomarcal1984@gmail.com |
94efea366ba733d9f51675b107757d1b5dd5a454 | 08c29b5f496127a48c5479a6f9323bd11213f5eb | /ReTraining.py | 4b6f9c6230cc7562b9be1857bd564dafaeb34f11 | [] | no_license | Amol2709/Dispatcher | 6edcdb25983b647c29fc205b2fe978d40112afe5 | a55395b879a3b1d1963aa3885cd617e1dd1ffb78 | refs/heads/main | 2023-03-21T21:19:02.341988 | 2021-03-07T17:38:34 | 2021-03-07T17:38:34 | 344,072,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,853 | py | # import warnings
# warnings.filterwarnings('ignore')
import tensorflow as tf
import numpy as np
import pandas as pd
import re
from bs4 import BeautifulSoup
from sklearn import preprocessing
from tqdm import tqdm
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from datetime import date
from datetime import datetime
from CustomCallBack import MyCallback
###################################################################################################################################
import tensorflow_hub as hub
import matplotlib.pyplot as plt
from sklearn.utils import class_weight
#####################################################################################################################################3
#warnings.filterwarnings("ignore", category=DeprecationWarning)
class ReTraining:
def __init__(self,df,status):
self.df = df
self.status = status
print("*"*100)
print('Data Preparing Started')
self.df = self.df[["desc",self.status]]
self.A=dict(self.df[self.status].value_counts())
self.x=list(self.A.keys()) # number of new tags
self.y= list(self.A.values())
self.le = preprocessing.LabelEncoder()
self.le.fit(self.x)
self.Labels=self.le.transform(list(self.le.classes_))
a =self.Labels.copy()
b = np.zeros((a.size, a.max()+1))
b[np.arange(a.size),a] = 1
self.train_label = np.zeros((self.df.count()[0],len(self.Labels)))
#print(list(self.le.classes_))
for i in range(0,self.df.count()[0]):
#print(self.df[self.status][i])
Index=list(self.le.classes_).index(self.df[self.status][i])
self.train_label[i,:] = b[Index,:]
#self.training_desc = list(self.df['desc'])
self.clean_train_desc = list(self.df['desc'])
##########################################################################################################################################################
self.DICT ={}
for i in range(0,len(self.x)):
self.DICT[list(self.le.classes_)[i]] = list(self.Labels)[i]
y_helper = []
T=list(self.df[self.status])
for i in range(0,len(T)):
y_helper.append(self.DICT[T[i]])
class_weights = class_weight.compute_class_weight('balanced',self.Labels,y_helper)
self.class_weights = dict(enumerate(class_weights))
######################################################################################################################################################
print("*"*100)
print('Data Preparing Finished')
def LoadAGModel(self,trained_model):
print("*"*100)
print('Loading Old Assignnment Group Model From Disk For ReTraining ............')
print("*"*100)
#print('Building New Model..............')
self.trained_model = trained_model
##########################################################################################################################################
self.model = tf.keras.models.load_model(self.trained_model,custom_objects={'KerasLayer': hub.KerasLayer})
#############################################################################################################################################
self.model.summary()
def LoadTAGModel(self,trained_model):
print("*"*100)
print('Loading Old ML Tag Model From Disk For ReTraining ............')
self.trained_model = trained_model
#############################################################################################################################################
self.model = tf.keras.models.load_model(self.trained_model,custom_objects={'KerasLayer': hub.KerasLayer})
##############################################################################################################################################
self.model.summary()
def ModelTraining(self,name,epoch=1):
print("*"*100)
print('ReTraining Started............')
self.num_epochs = epoch
self.name=name
# vocab_size = 1500
# embedding_dim = 32
# max_length = 150
# trunc_type='post'
# oov_tok = "<OOV>"
# self.tokenizer = Tokenizer(num_words = vocab_size, oov_token=oov_tok)
# self.tokenizer.fit_on_texts(self.clean_train_desc)
# word_index = self.tokenizer.word_index
# clean_train_sequences = self.tokenizer.texts_to_sequences(self.clean_train_desc)
# self.clean_train_padded = pad_sequences(clean_train_sequences,maxlen=max_length, truncating=trunc_type)
callbacks = MyCallback()
######################################################################################################################################
self.history = self.model.fit(np.array(self.clean_train_desc), self.train_label, epochs=self.num_epochs,callbacks=[callbacks],class_weight=self.class_weights,shuffle=True)
scores = self.model.evaluate(np.array(self.clean_train_desc),self.train_label)
##################################################################################################################################################
plt.plot(self.history.history['accuracy'])
plt.plot(self.history.history['loss'])
plt.title('model Detail: {}'.format(self.status))
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['accuracy', 'loss'], loc='upper left')
plt.savefig('model Detail_{}.jpg'.format(self.status))
############################################################################################################################################################
##########################################################################################################################################
print("Accuracy: %.2f%%" % (scores[1]*100))
############################################################################################################################################
#...just make sure this
self.model.save(self.name+'.h5')
#############################################################################################################################################
print('model save succesfully')
# def ModelPredictionAG(self):
# print('*'*50)
# print('Prediction on AG : .........................................')
# max_length = 150
# trunc_type='post'
# cross_check=3
# self.seed_text = self.clean_train_desc[cross_check]
# #self.seed_text = seed_text
# token_list = self.tokenizer.texts_to_sequences([self.seed_text])[0]
# token_list = pad_sequences([token_list], maxlen=max_length, truncating=trunc_type)
# re_model = tf.keras.models.load_model('Assignmentgroup_model.h5')
# predicted = re_model.predict(token_list,verbose=0)
# print("Model Prediction : {}".format(list(self.le.classes_)[np.argmax(predicted)]))
# print("Original Tag: {}".format(self.df.iloc[cross_check][self.status]))
# def ModelPredictionTAG(self):
# print('*'*50)
# print('Prediction on TAG : .........................................')
# max_length = 150
# trunc_type='post'
# cross_check=3
# self.seed_text = self.clean_train_desc[cross_check]
# #self.seed_text = seed_text
# token_list = self.tokenizer.texts_to_sequences([self.seed_text])[0]
# token_list = pad_sequences([token_list], maxlen=max_length, truncating=trunc_type)
# re_model = tf.keras.models.load_model('ML_TAGmodel.h5')
# predicted = re_model.predict(token_list,verbose=0)
# #print(predicted)
# #print(len(list(self.le.classes_)))
# #print(np.argmax(predicted))
# #print(list(self.le.classes_)[np.argmax(predicted)])
# print("Model Prediction : {}".format(list(self.le.classes_)[np.argmax(predicted)]))
# print("Original Tag: {}".format(self.df.iloc[cross_check][self.status]))
| [
"noreply@github.com"
] | Amol2709.noreply@github.com |
8c743604f3458dd657f2da1517973d146cf1f937 | 8f3e2b6c8c03886c5f6cb15e4f9be67c01e0f222 | /local_main.py | 5c40f12ce554fcd3bf1e7c570a0e4574006cc1f9 | [] | no_license | stantonius/jetson-nano | 0b1b528c69a075b15bf0938ebc6364d5af1cdb5a | 60ac3c20061708d52375cf64474bf28d4395f4ed | refs/heads/master | 2023-05-02T11:15:29.167718 | 2021-05-22T01:08:40 | 2021-05-22T01:08:40 | 365,602,543 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | # import numpy as np
# import cv2
# def run():
# def dummy_inference(x):
# """invert an image"""
# return 255-x
# cap = cv2.VideoCapture(0) # your webcam
# while True: # forever
# ret, frame_in = cap.read() # 1. read frame
# frame_out = dummy_inference(frame_in) # 2. process frame
# cv2.imshow('frame', frame_out) # 3. display frame
# # logic for conditional termination of the loop...
# if __name__ == "__main__":
# run()
import cv2
cap = cv2.VideoCapture(0)
# Check if the webcam is opened correctly
if not cap.isOpened():
raise IOError("Cannot open webcam")
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA)
cv2.imshow('Input', frame)
c = cv2.waitKey(1)
if c == 27:
break
cap.release()
cv2.destroyAllWindows() | [
"craig.stanton2@gmail.com"
] | craig.stanton2@gmail.com |
d27848d978fa34a0399ffb0f4f5a2df26acee3b6 | a8042cb7f6a4daec26b8cea6b7da2cb7cb880a84 | /970_PowerfulIntegers.py | 2a275f3c79e67bea51ef91ddb05ba8ebb968d662 | [] | no_license | renukadeshmukh/Leetcode_Solutions | 0108edf6c5849946623a75c2dfd57cbf9bb338e4 | 1211eac167f33084f536007468ea10c1a0ceab08 | refs/heads/master | 2022-11-10T20:48:42.108834 | 2022-10-18T07:24:36 | 2022-10-18T07:24:36 | 80,702,452 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,695 | py | '''
970. Powerful Integers
Given two positive integers x and y, an integer is powerful if it is equal to
x^i + y^j for some integers i >= 0 and j >= 0.
Return a list of all powerful integers that have value less than or equal to bound.
You may return the answer in any order. In your answer, each value should occur
at most once.
Example 1:
Input: x = 2, y = 3, bound = 10
Output: [2,3,4,5,7,9,10]
Explanation:
2 = 2^0 + 3^0
3 = 2^1 + 3^0
4 = 2^0 + 3^1
5 = 2^1 + 3^1
7 = 2^2 + 3^1
9 = 2^3 + 3^0
10 = 2^0 + 3^2
Example 2:
Input: x = 3, y = 5, bound = 15
Output: [2,4,6,8,10,14]
Note:
1 <= x <= 100
1 <= y <= 100
0 <= bound <= 10^6
'''
'''
ALGORITHM:
BRUTE FORCE
1. x_pow_arr = Find all powers of x <= bound
2. y_pow_arr = Find all powers of y <= bound
3. Check all (a,b) sums in x_pow_arr and y_pow_arr <= bound
RUNTIME COMPLEXITY: O(log^2 bound)
SPACE COMPLEXITY: O(log^2 bound)
'''
class Solution(object):
def getPowerArray(self, z, bound):
pow_arr = [1]
zp = 1
if z != 1:
while zp <= bound:
zp = zp * z
pow_arr.append(zp)
return pow_arr
def powerfulIntegers(self, x, y, bound):
"""
:type x: int
:type y: int
:type bound: int
:rtype: List[int]
"""
x_pow_arr = self.getPowerArray(x, bound)
y_pow_arr = self.getPowerArray(y, bound)
result = set()
for a in x_pow_arr:
for b in y_pow_arr:
sm = a + b
if sm <= bound:
result.add(sm)
else:
break
return list(result)
| [
"redeshmu@cisco.com"
] | redeshmu@cisco.com |
5168915945ee69cfb69c0258530fae44d0b9b359 | be21d84dbbc42277008bac4d679cba7407e21601 | /awwards/tests.py | 2c72c44967d09ef4c3d7b0793a201046bd7efa12 | [
"MIT"
] | permissive | omukankurunziza/awwarda-app | 255846c64c4c4d029602a716dbac7098320f5ad4 | 563be1dde2d3e48628ac1be28b33537bb90ef927 | refs/heads/master | 2020-05-03T20:50:26.568887 | 2019-04-05T11:20:14 | 2019-04-05T11:20:14 | 178,811,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | from django.test import TestCase
from .models import Project,Profile,Rating
class ProjectTestClass(TestCase):
# Set up method
def setUp(self):
self.gallery= Project()
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.gallery,Project))
# Testing Save Method
def test_save_method(self):
self.gallery.save_image()
description= Project.objects.all()
self.assertTrue(len(description) > 0)
class RatingTestClass(TestCase):
# Set up method
def setUp(self):
self.gallery= Rating()
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.gallery,Rating))
class ProfileTestClass(TestCase):
# Set up method
def setUp(self):
self.gallery= Profile( )
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.gallery,Profile))
| [
"nshutioppo@yahoo.fr"
] | nshutioppo@yahoo.fr |
71120eeeae3421385975f9514e2c692f63c876e8 | f1513510612b21aba6e689e0d1e8a37839eb6b08 | /www/CMFBData.py | 71428fb886b1f65f7e0b2519657d1c7337378f50 | [] | no_license | baibaizhang/awesome-python3-webapp | e36fc26911323ba764bcb5d229570e40e1006034 | a8cf26f910f5a7d15fb947c58a754322968a18a9 | refs/heads/master | 2021-01-26T00:54:09.238911 | 2019-09-25T00:47:40 | 2019-09-25T00:47:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,285 | py | #!/usr/bin/env python3
#coding:utf-8
'''''
@author: steve
获取筹码分布数据
'''
import re,time,random
import pandas as pd
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support import expected_conditions as EC
from fake_useragent import UserAgent
import pyautogui
from operator import itemgetter
from itertools import groupby
class CMFBData(object):
def __init__(self, load_parameter='silent'):
# print(self.__class__.__name__+" __init__")
# 给浏览器设置属性
option = webdriver.ChromeOptions()
# 隐藏警告语‘Chrome正在受到自动软件的控制’
option.add_argument('disable-infobars')
# 设置隐藏还是前台运行,默认隐藏
if load_parameter == 'silent':
option.add_argument("headless")
# 产生随机user-agent
option.add_argument(UserAgent().random)
self.browser = webdriver.Chrome(options=option)
if not (load_parameter == 'silent'):
#最大化浏览器
self.browser.maximize_window()
self.timeoutCodeList = []
self.retrytime = 3
def __del__(self):
# 关闭浏览器
self.browser.quit()
# print(self.__class__.__name__+" __del__")
# 解析网页获取筹码分布数据并返回数据
def _parse_page(self):
COLUMN_LIST = ('日期','获利比例','亏损比例','平均成本','90%成本','90成本集中度','70%成本','70成本集中度')
count = len(COLUMN_LIST)
data = {}
index = 0
# 解析网页
soup = BeautifulSoup(self.browser.page_source, 'lxml')
for span in soup.find(class_="__emchatrs3_cmfb").find_all('span'):
if index >= count:
break
data[COLUMN_LIST[index]] = span.contents[0]
index = index + 1
# print(data)
return data
def _get_url(self, code):
code_map = {'60': 'sh', '00':'sz', '30':'sz'}
code_str = ''
if isinstance(code, int):
code_str = str(code)
elif isinstance(code, float):
code_str = str(code)
elif isinstance(code, str):
code_str = code
for item in code_map:
if code_str.startswith(item):
return "http://quote.eastmoney.com/concept/" + code_map[item] + code_str + ".html"
# 打开并加载网页, 重复3次
def _load_web(self, url):
retry_time = 0
while retry_time <= 3:
try:
browser = self.browser
browser.get(url)
# 找到筹码分布的按钮---通过xpath
btn_cmfb_xpath = "//a[text()='筹码分布']"
# 等待响应完成
wait = WebDriverWait(browser, 10)
wait.until(EC.presence_of_element_located((By.XPATH, btn_cmfb_xpath)))
# 查找目标按钮
btn_cmfb = browser.find_element_by_xpath(btn_cmfb_xpath)
# 找到按钮后单击
btn_cmfb.click()
# 等待筹码分布的元素显示出来,不然解析数据的时候抓取不到相关数据
wait = WebDriverWait(browser, 10)
# wait.until(EC.presence_of_all_elements_located((By.XPATH, "//div[@class='__emchatrs3_cmfb']" )))
# wait.until(EC.visibility_of_element_located((By.XPATH, "//div[@class='__emchatrs3_cmfb']" )))
wait.until(EC.text_to_be_present_in_element((By.XPATH,"//div[@class='__emchatrs3_cmfb']"),u'集中度'))
return True
except Exception as e:
print("[INFO] %s%s" % (e,url))
retry_time = retry_time + 1
def get_current(self,code):
# data_list = []
data={}
url = self._get_url(code)
# 如果网页加载失败,直接返回
if not self._load_web(url):
print("网页加载失败: " + url)
return data
data = self._parse_page()
return data
# data_list.append(data)
# print(data_list)
# return data_list
def get_history(self,code):
data_list = []
url = self._get_url(code)
# 如果网页加载失败,直接返回
if not self._load_web(url):
print("网页加载失败: " + url)
return data_list
browser = self.browser
# 移动滚动条定位到某个元素,使这个元素在可见区域,一般在最顶上
target = browser.find_element_by_xpath("//div[@class='kr-box']")
# target = browser.find_element_by_xpath(btn_cmfb_xpath)
browser.execute_script("arguments[0].scrollIntoView();", target)
time.sleep(2)
# 移动到某个起始位置
START_X = 0
START_Y = 0
END_X = 0
MOVE_X = 0
screenWidth,screenHeight = pyautogui.size()
print("screenWidth : " + str(screenWidth))
if screenWidth == 1366 :
START_X = 350
END_X = 996
START_Y = 485
MOVE_X = 8
elif screenWidth == 1920:
START_X = 544
END_X = 1350
START_Y = 666
MOVE_X = 10
else:
print("不能匹配到屏幕尺寸,请增加")
return
currentX= START_X
currentY= START_Y
pyautogui.moveTo(START_X, START_Y)
time.sleep(2)
while currentX < END_X:
data = self._parse_page()
data_list.append(data)
# # 鼠标向右移动x像素
currentX = currentX + MOVE_X
pyautogui.moveTo(currentX, currentY)
# 等待筹码分布的元素显示出来,不然解析数据的时候抓取不到相关数据
wait = WebDriverWait(browser, 10)
# wait.until(EC.presence_of_all_elements_located((By.XPATH, "//div[@class='__emchatrs3_cmfb']" )))
# wait.until(EC.visibility_of_element_located((By.XPATH, "//div[@class='__emchatrs3_cmfb']" )))
wait.until(EC.text_to_be_present_in_element((By.XPATH,"//div[@class='__emchatrs3_cmfb']"),u'集中度'))
# data_list 需要去重和排序
print(data_list)
print(len(data_list))
data_list = self._distinct(data_list, '日期')
print(len(data_list))
print(data_list)
return data_list
# 含dict的list排序并去重
def _distinct(self, items,key, reverse=False):
key = itemgetter(key)
items = sorted(items, key=key, reverse=reverse)
return [next(v) for _, v in groupby(items, key=key)]
def main():
# http://quote.eastmoney.com/concept/sz000002.html
# test = CMBFData()
# test.get_data_current('000002')
# 抓取历史数据必须打开浏览器到前台
test = CMFBData('show_browser')
test.get_history('000002')
# test.getData('000002')
# test.getData('601318')
# test.getData('300002')
if __name__ == '__main__':
main()
| [
"linux_wang@hotmail.com"
] | linux_wang@hotmail.com |
07a9fc7adfc59bf87b0afadb1f76cb8901a55350 | c7eb867e0a6d00a319941164e6c90497f37016f9 | /Buyer/views.py | 6e1a474ad9c0b5be88186519c216d408d90e7e1e | [] | no_license | d107286/E-shop_project | 3dc408171509ca3f68ae3b86fe03f4f0ab4fa775 | 27cc04d70e3c0c49fefa4ef1a9a0ef16166fd02e | refs/heads/master | 2022-12-15T02:23:36.201145 | 2019-10-08T09:17:29 | 2019-10-08T09:17:29 | 213,475,506 | 0 | 0 | null | 2022-12-04T15:01:57 | 2019-10-07T20:02:51 | CSS | UTF-8 | Python | false | false | 8,254 | py | import hashlib
import time,datetime
from Buyer.models import *
from Seller.models import *
from alipay import AliPay
from Seller.views import setPassword
from django.http import JsonResponse
from django.shortcuts import render,HttpResponseRedirect,HttpResponse
from Qshop.settings import alipay_public_key_string,alipay_private_key_string
def LoginValid(fun):
def inner(request,*args,**kwargs):
cookie_user = request.COOKIES.get("username")
session_user = request.session.get("username")
if cookie_user and session_user and cookie_user == session_user:
return fun(request,*args,**kwargs)
else:
return HttpResponseRedirect("Buyer/login/")
return inner
def register(request):
if request.method == 'POST':
username = request.POST.get('user_name')
password = request.POST.get('pwd')
email = request.POST.get('email')
db_password = request.POST.get('cpwd')
if password == db_password:
user = LoginUser()
user.username = username
user.password = setPassword(password)
user.email = email
user.save()
return HttpResponseRedirect('/Buyer/login/')
return render(request,'buyer/register.html',locals())
def login(request):
if request.method == "POST":
password = request.POST.get("pwd")
email = request.POST.get("email")
user = LoginUser.objects.filter(email=email).first()
if user:
db_password = user.password
password = setPassword(password)
if db_password == password:
response = HttpResponseRedirect('/Buyer/index/')
response.set_cookie('username',user.username)
response.set_cookie('user_id',user.id)
response.set_cookie('email',user.email)
request.session['username'] = user.username
return response
return render(request,'buyer/login.html',locals())
def index(request):
goods_type = GoodsType.objects.all()
result = []
for ty in goods_type:
goods = ty.goods_set.order_by("-goods_pro_time")
if len(goods)>4:
goods = goods[:4]
result.append({"type":ty,"goods_list":goods})
print(result)
return render(request,'buyer/index.html',locals())
def goods_list(request):
request_type = request.GET.get("type")
keyword = request.GET.get("keywords")
goods_list = []
if request_type == "t":
if keyword:
id = int(keyword)
goods_type = GoodsType.objects.get(id = id)
goods_list = goods_type.goods_set.order_by("-goods_pro_time")
elif request_type == "k":
if keyword:
goods_list = Goods.objects.filter(goods_name__contains=keyword).order_by("-goods_pro_time")
#买家页面分页功能
if goods_list:
lenth = len(goods_list) / 5
if lenth != int(lenth):
lenth += 1
lenth = int(lenth)
recommend = goods_list[:lenth]
return render(request,"buyer/goods_list.html",locals())
def goods_detail(request,id):
goods = Goods.objects.get(id = int(id))
return render(request,"buyer/detail.html",locals())
@LoginValid
def user_info(request):
return render(request,"buyer/user_info.html",locals())
@LoginValid
def add_cart(request):
result = {
"code":200,
"data":""
}
if request.method == "POST":
id = int(request.POST.get("goods_id"))
count = int(request.POST.get("count",1))
goods = Goods.objects.get(id=id)#获取商品信息
cart = Cart()
cart.goods_name = goods.goods_name
cart.goods_number = count
cart.goods_price = goods.goods_price
cart.goods_picture = goods.picture
cart.goods_total = goods.goods_price*count
cart.goods_id = id
cart.cart_user = request.COOKIES.get("user_id")
cart.save()
result["data"] = "加入购物车成功"
else:
result["code"] = 500
result["data"] = "请求方式错误"
return JsonResponse(result)
def cart(request):
user_id = request.COOKIES.get("user_id")
goods = Cart.objects.filter(cart_user=int(user_id))
count = goods.count()
return render(request,"buyer/cart.html",locals())
@LoginValid
def pay_order(request):
goods_id = request.GET.get("goods_id")
count = request.GET.get("count")
if goods_id and count:
# 保存订单表,保存总价
order = PayOrder()
order.order_number = str(time.time()).replace(".", "")
order.order_data = datetime.datetime.now()
order.order_user = LoginUser.objects.get(id=int(request.COOKIES.get("user_id"))) # 订单对应的买家
order.save()
# 保存订单详情
# 查询商品的信息
goods = Goods.objects.get(id=int(goods_id))
order_info = OrderInfo()
order_info.goods_id = goods_id
order_info.goods_picture = goods.picture
order_info.goods_name = goods.goods_name
order_info.goods_count = int(count)
order_info.goods_price = goods.goods_price
order_info.goods_total_price = goods.goods_price * int(count)
order_info.store_id = goods.goods_store
order_info.save()
order.order_total = order_info.goods_total_price
order.save()
return render(request, "buyer/pay_order.html", locals())
@LoginValid
def pay_order_more(request):
data = request.GET
data_item = data.items()
request_data = []
for key, value in data_item:
if key.startswith("check_"):
goods_id = key.split("_", 1)[1]
count = data.get("count_" + goods_id)
request_data.append((int(goods_id), int(count)))
if request_data:
# 保存订单表,但是保存总价
order = PayOrder()
order.order_number = str(time.time()).replace(".", "")
order.order_data = datetime.datetime.now()
order.order_user = LoginUser.objects.get(id=int(request.COOKIES.get("user_id")))
order.save()
# 保存订单详情
# 查询商品的信息
order_total = 0
for goods_id, count in request_data:
print(goods_id, count)
goods = Goods.objects.get(id=int(goods_id))
order_info = OrderInfo()
order_info.order_id = order
order_info.goods_id = goods_id
order_info.goods_picture = goods.picture
order_info.goods_name = goods.goods_name
order_info.goods_count = int(count)
order_info.goods_price = goods.goods_price
order_info.goods_total_price = goods.goods_price*int(count)
order_info.store_id = goods.goods_store#商品卖家,goods.good_store本身就是一条卖家信息
order_info.save()
order_total += order_info.goods_total_price
order.order_total = order_total
order.save()
return render(request, 'buyer/pay_order.html', locals())
def AliPayViews(request):
order_number = request.GET.get("order_number")
order_total = request.GET.get("order_total")
# 实例化支付
alipay = AliPay(
appid="2016101200667752",
app_notify_url=None,
app_private_key_string=alipay_private_key_string,
alipay_public_key_string=alipay_public_key_string,
sign_type="RSA2"
)
# 实例化订单
order_string = alipay.api_alipay_trade_page_pay(
out_trade_no=order_number, # 订单号
total_amount=str(order_total), # 支付金额 字符串
subject="生鲜交易", # 支付主题
return_url="http://127.0.0.1:8000/Buyer/pay_result/",
notify_url="http://127.0.0.1:8000/Buyer/pay_result/"
) # 网页支付订单
result = "https://openapi.alipaydev.com/gateway.do?" + order_string
return HttpResponseRedirect(result)
def pay_result(request):
out_trade_no = request.GET.get("out_trade_no")
if out_trade_no:
order = PayOrder.objects.get(order_number=out_trade_no)
order.order_status = 1
order.save()
return render(request, 'buyer/pay_result.html', locals())
# Create your views here.
| [
"d1072@qq.com"
] | d1072@qq.com |
b8cc3ec6b1a4e85d4427b52f7d26759a67d215e6 | a5f733362ced4fad887cc500e0b264d01830e10b | /image_reader.py | 58bb6b83244258f42ea7490aab0d8defffd962b5 | [] | no_license | zenglh666/SparseNet | 84ce117a54fcebe2eca79a90b9dc39353bb18a6b | 8a0586815fccfff9a2d8fa0a94e4343869dff38d | refs/heads/master | 2021-09-14T20:04:54.375416 | 2018-05-18T15:17:31 | 2018-05-18T15:17:31 | 125,369,453 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,958 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Read and preprocess image data.
Image processing occurs on a single image at a time. Image are read and
preprocessed in parallel across multiple threads. The resulting images
are concatenated together to form a single batch for training or evaluation.
-- Provide processed image data for a network:
inputs: Construct batches of evaluation examples of images.
distorted_inputs: Construct batches of training examples of images.
batch_inputs: Construct batches of training or evaluation examples of images.
-- Data processing:
parse_example_proto: Parses an Example proto containing a training example
of an image.
-- Image decoding:
decode_jpeg: Decode a JPEG encoded string into a 3-D float32 Tensor.
-- Image preprocessing:
image_preprocessing: Decode and preprocess one image for evaluation or training
distort_image: Distort one image for training a network.
eval_image: Prepare one image for evaluation.
distort_color: Distort the color in one image for training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('num_preprocess_threads', 16,
"""Number of preprocessing threads per tower. """
"""Please make this a multiple of 4.""")
tf.app.flags.DEFINE_integer('num_readers', 8,
"""Number of parallel readers during train.""")
tf.app.flags.DEFINE_string('mean_file', 'F:/data/imagenet_mean.npy',
"""Path to the imagenet data directory.""")
tf.app.flags.DEFINE_boolean('distort_color',False,
'''If we distort color''')
def create_data_batch(dataset, batch_size, num_preprocess_threads=None):
if dataset.subset == 'train':
images, labels = batch_inputs(
dataset, batch_size, train=True,
num_preprocess_threads=num_preprocess_threads,
num_readers=FLAGS.num_readers)
elif dataset.subset == 'validation':
images, labels = batch_inputs(
dataset, batch_size, train=False,
num_preprocess_threads=num_preprocess_threads,
num_readers=FLAGS.num_readers)
return images, labels
def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None,
num_readers=None):
"""Contruct batches of training or evaluation examples from the image dataset.
Args:
dataset: instance of Dataset class specifying the dataset.
See dataset.py for details.
batch_size: integer
train: boolean
num_preprocess_threads: integer, total number of preprocessing threads
num_readers: integer, number of parallel readers
Returns:
images: 4-D float Tensor of a batch of images
labels: 1-D integer Tensor of [batch_size].
Raises:
ValueError: if data is not found
"""
with tf.name_scope('batch_processing'):
data_files = dataset.data_files()
if data_files is None:
raise ValueError('No data files found for this dataset')
# Create filename_queue
if train:
filename_queue = tf.train.string_input_producer(data_files)
else:
filename_queue = tf.train.string_input_producer(data_files)
if num_preprocess_threads is None:
num_preprocess_threads = FLAGS.num_preprocess_threads
if num_preprocess_threads % 4:
raise ValueError('Please make num_preprocess_threads a multiple '
'of 4 (%d % 4 != 0).', num_preprocess_threads)
if num_readers is None:
num_readers = FLAGS.num_readers
if num_readers < 1:
raise ValueError('Please make num_readers at least 1')
# Approximate number of examples per shard.
examples_per_shard = 1024
# Size the random shuffle queue to balance between good global
# mixing (more examples) and memory use (fewer examples).
# 1 image uses 299*299*3*4 bytes = 1MB
# The default input_queue_memory_factor is 16 implying a shuffling queue
# size: examples_per_shard * 16 * 1MB = 17.6GB
min_queue_examples = examples_per_shard
if train:
examples_queue = tf.RandomShuffleQueue(
capacity=min_queue_examples + 8 * batch_size,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string])
else:
examples_queue = tf.FIFOQueue(
capacity=examples_per_shard + 8 * batch_size,
dtypes=[tf.string])
# Create multiple readers to populate the queue of examples.
if num_readers > 1:
enqueue_ops = []
for _ in range(num_readers):
reader = dataset.reader()
_, value = reader.read(filename_queue)
enqueue_ops.append(examples_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(
tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))
example_serialized = examples_queue.dequeue()
else:
reader = dataset.reader()
_, example_serialized = reader.read(filename_queue)
if dataset.name=='imagenet' or dataset.name=='imagenet_scale':
mean_array = np.transpose(tf.divide(np.load(FLAGS.mean_file), 256.), (2,1,0))
mean_tensor = tf.convert_to_tensor(mean_array, tf.float32)
else:
mean_tensor = None
images_and_labels = []
for thread_id in range(num_preprocess_threads):
# Parse a serialized Example proto to extract the image and metadata.
image, label_index = dataset.parse_from_string(example_serialized)
image = image_preprocessing(
image, train, dataset.resize_size, dataset.crop_size, mean_tensor, thread_id)
images_and_labels.append([image, label_index])
images, label_index_batch = tf.train.batch_join(
images_and_labels,
batch_size=batch_size,
capacity= num_preprocess_threads * batch_size)
return images, tf.reshape(label_index_batch, [batch_size])
def image_preprocessing(image, train, resize_size, crop_size, mean=None, thread_id=0):
"""Decode and preprocess one image for evaluation or training.
Args:
image_buffer: JPEG encoded string Tensor
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
train: boolean
thread_id: integer indicating preprocessing thread
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
image = tf.image.resize_images(
image, [resize_size, resize_size])
if mean is not None:
image = tf.subtract(image, mean)
if train:
image = distort_image(image, crop_size, crop_size, thread_id)
else:
image = eval_image(image, crop_size, crop_size)
if mean is not None:
image = tf.multiply(image, 128.)
else:
image = tf.image.per_image_standardization(image)
return image
def distort_image(image, height, width, bbox, thread_id=0, scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
image: 3-D float Tensor of image
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
thread_id: integer indicating the preprocessing thread.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of distorted image used for training.
"""
with tf.name_scope(values=[image, height, width, bbox], name=scope,
default_name='distort_image'):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# Display the bounding box in the first thread only.
distorted_image = tf.random_crop(image, [height,width,3])
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([height, width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors.
if FLAGS.distort_color:
distorted_image = distort_color(distorted_image, thread_id)
return distorted_image
def eval_image(image, height, width, scope=None):
"""Prepare one image for evaluation.
Args:
image: 3-D float Tensor
height: integer
width: integer
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(values=[image, height, width], name=scope, default_name='eval_image'):
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.resize_image_with_crop_or_pad(image, height, width)
return image
def distort_color(image, thread_id=0, scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: Tensor containing single image.
thread_id: preprocessing thread ID.
scope: Optional scope for name_scope.
Returns:
color-distorted image
"""
with tf.name_scope(values=[image], name=scope, default_name='distort_color'):
color_ordering = thread_id % 2
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
return image | [
"zenglh@outlook.com"
] | zenglh@outlook.com |
1babf3615721b1fdb611c2f462dddbe3f692de44 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/proc/procmemhist1d.py | be140757889fe189e82b006962eee9f8a0791f1e | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 16,922 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class ProcMemHist1d(Mo):
"""
A class that represents historical statistics for Process memory in a 1 day sampling interval. This class updates every hour.
"""
meta = StatsClassMeta("cobra.model.proc.ProcMemHist1d", "Process memory")
counter = CounterMeta("used", CounterCategory.GAUGE, "kB", "Used memory")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "usedMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "usedMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "usedAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "usedSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "usedThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "usedTr"
meta._counters.append(counter)
counter = CounterMeta("alloced", CounterCategory.GAUGE, "kB", "Allocated memory")
counter._propRefs[PropCategory.IMPLICIT_MIN] = "allocedMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "allocedMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "allocedAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "allocedSpct"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "allocedThr"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "allocedTr"
meta._counters.append(counter)
meta.moClassName = "procProcMemHist1d"
meta.rnFormat = "HDprocProcMem1d-%(index)s"
meta.category = MoCategory.STATS_HISTORY
meta.label = "historical Process memory stats in 1 day"
meta.writeAccessMask = 0x800000000000001
meta.readAccessMask = 0x800000000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = True
meta.parentClasses.add("cobra.model.proc.Proc")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Hist")
meta.superClasses.add("cobra.model.proc.ProcMemHist")
meta.rnPrefixes = [
('HDprocProcMem1d-', True),
]
prop = PropMeta("str", "allocedAvg", "allocedAvg", 10623, PropCategory.IMPLICIT_AVG)
prop.label = "Allocated memory average value"
prop.isOper = True
prop.isStats = True
meta.props.add("allocedAvg", prop)
prop = PropMeta("str", "allocedMax", "allocedMax", 10622, PropCategory.IMPLICIT_MAX)
prop.label = "Allocated memory maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("allocedMax", prop)
prop = PropMeta("str", "allocedMin", "allocedMin", 10621, PropCategory.IMPLICIT_MIN)
prop.label = "Allocated memory minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("allocedMin", prop)
prop = PropMeta("str", "allocedSpct", "allocedSpct", 10624, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Allocated memory suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("allocedSpct", prop)
prop = PropMeta("str", "allocedThr", "allocedThr", 10625, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Allocated memory thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("allocedThr", prop)
prop = PropMeta("str", "allocedTr", "allocedTr", 10626, PropCategory.IMPLICIT_TREND)
prop.label = "Allocated memory trend"
prop.isOper = True
prop.isStats = True
meta.props.add("allocedTr", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "index", "index", 7047, PropCategory.REGULAR)
prop.label = "History Index"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("index", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "usedAvg", "usedAvg", 10644, PropCategory.IMPLICIT_AVG)
prop.label = "Used memory average value"
prop.isOper = True
prop.isStats = True
meta.props.add("usedAvg", prop)
prop = PropMeta("str", "usedMax", "usedMax", 10643, PropCategory.IMPLICIT_MAX)
prop.label = "Used memory maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("usedMax", prop)
prop = PropMeta("str", "usedMin", "usedMin", 10642, PropCategory.IMPLICIT_MIN)
prop.label = "Used memory minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("usedMin", prop)
prop = PropMeta("str", "usedSpct", "usedSpct", 10645, PropCategory.IMPLICIT_SUSPECT)
prop.label = "Used memory suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("usedSpct", prop)
prop = PropMeta("str", "usedThr", "usedThr", 10646, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "Used memory thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("usedThr", prop)
prop = PropMeta("str", "usedTr", "usedTr", 10647, PropCategory.IMPLICIT_TREND)
prop.label = "Used memory trend"
prop.isOper = True
prop.isStats = True
meta.props.add("usedTr", prop)
meta.namingProps.append(getattr(meta.props, "index"))
def __init__(self, parentMoOrDn, index, markDirty=True, **creationProps):
namingVals = [index]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"rrishike@cisco.com"
] | rrishike@cisco.com |
7563e483382a3bdedfe13cf2c4924a569db4553f | 7ea5c45401947eaa56c7abb571fc5968aa74abd1 | /python入门/day_2_列表/2-4-使用方法sort()对表进行永久性排序.py | 4253177b9bd7302c0a42c212052745afe12572d8 | [] | no_license | jihongsheng/python3 | a901d47c7a46054360f5efe8087ad0f958981945 | 12e2d5bf29bc8c1d16f05e6afcbc6f70530d0d6d | refs/heads/master | 2020-05-16T22:18:50.210424 | 2019-05-14T00:53:39 | 2019-05-14T00:53:39 | 183,331,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | # -*- coding: UTF-8 -*-
# "Python方法sort() 让你能够较为轻松地对列表进行排序。假设你有一个汽车列表,
# 并要让其中的汽车按字母顺序排列。为简化这项任务,我们假设该列表中的所有值都是小写的。"
cars = ['bmw', 'audi', 'toyota', 'subaru']
# 方法sort();永久性地修改了列表元素的排列顺序。现在,汽车是按字母顺序排列的,再也无法恢复到原来的排列顺序:
cars.sort()
print(cars)
print("-" * 80)
# 你还可以按与字母顺序相反的顺序排列列表元素,为此,只需向sort() 方法传递参数reverse=True 。
# 下面的示例将汽车列表按与字母顺序相反的顺序排列:
cars = ['bmw', 'audi', 'toyota', 'subaru']
cars.sort(reverse=True)
print(cars)
print("-" * 80)
# 同样,对列表元素排列顺序的修改是永久性的:
| [
"6909283@qq.com"
] | 6909283@qq.com |
c6542cc43626f8f84ea23c20a4772e3b37428c22 | 2db0345c2f85761d63defa95c9685dfec1927f0c | /quantumflow/decompositions.py | 1c2062c630a4fb5b1caf7a2ee51e2d30ce07374e | [
"Apache-2.0"
] | permissive | go-bears/quantumflow | fe64dc50d4859032ec5c6228ee280ab8865550a2 | 4e02de5c575d113599aaa787153afd73382228db | refs/heads/master | 2021-06-09T11:34:53.227873 | 2019-03-18T22:27:08 | 2019-03-18T22:27:08 | 155,956,527 | 0 | 0 | Apache-2.0 | 2021-04-29T19:30:38 | 2018-11-03T06:21:38 | Python | UTF-8 | Python | false | false | 12,114 | py |
# Copyright 2016-2018, Rigetti Computing
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
"""
QuantumFlow Gate Decompositions
"""
from typing import Sequence, Tuple
import itertools
import numpy as np
from numpy import pi
from .qubits import asarray
from .config import TOLERANCE
from .gates import Gate
from .measures import gates_close
from .stdgates import RN, CANONICAL, TZ, TY
from .circuits import Circuit
__all__ = ['bloch_decomposition',
'zyz_decomposition',
'kronecker_decomposition',
'canonical_decomposition',
'canonical_coords']
def bloch_decomposition(gate: Gate) -> Circuit:
"""
Converts a 1-qubit gate into a RN gate, a 1-qubit rotation of angle theta
about axis (nx, ny, nz) in the Bloch sphere.
Returns:
A Circuit containing a single RN gate
"""
if gate.qubit_nb != 1:
raise ValueError('Expected 1-qubit gate')
U = asarray(gate.asoperator())
U /= np.linalg.det(U) ** (1/2)
nx = - U[0, 1].imag
ny = - U[0, 1].real
nz = - U[0, 0].imag
N = np.sqrt(nx**2 + ny**2 + nz**2)
if N == 0: # Identity
nx, ny, nz = 1, 1, 1
else:
nx /= N
ny /= N
nz /= N
sin_halftheta = N
cos_halftheta = U[0, 0].real
theta = 2 * np.arctan2(sin_halftheta, cos_halftheta)
# We return a Circuit (rather than just a gate) to keep the
# interface of decomposition routines uniform.
return Circuit([RN(theta, nx, ny, nz, *gate.qubits)])
# DOCME TESTME
def zyz_decomposition(gate: Gate) -> Circuit:
"""
Returns the Euler Z-Y-Z decomposition of a local 1-qubit gate.
"""
if gate.qubit_nb != 1:
raise ValueError('Expected 1-qubit gate')
q, = gate.qubits
U = asarray(gate.asoperator())
U /= np.linalg.det(U) ** (1/2) # SU(2)
if abs(U[0, 0]) > abs(U[1, 0]):
theta1 = 2 * np.arccos(min(abs(U[0, 0]), 1))
else:
theta1 = 2 * np.arcsin(min(abs(U[1, 0]), 1))
cos_halftheta1 = np.cos(theta1/2)
if not np.isclose(cos_halftheta1, 0.0):
phase = U[1, 1] / cos_halftheta1
theta0_plus_theta2 = 2 * np.arctan2(np.imag(phase), np.real(phase))
else:
theta0_plus_theta2 = 0.0
sin_halftheta1 = np.sin(theta1/2)
if not np.isclose(sin_halftheta1, 0.0):
phase = U[1, 0] / sin_halftheta1
theta0_sub_theta2 = 2 * np.arctan2(np.imag(phase), np.real(phase))
else:
theta0_sub_theta2 = 0.0
theta0 = (theta0_plus_theta2 + theta0_sub_theta2) / 2
theta2 = (theta0_plus_theta2 - theta0_sub_theta2) / 2
t0 = theta0/np.pi
t1 = theta1/np.pi
t2 = theta2/np.pi
circ1 = Circuit()
circ1 += TZ(t2, q)
circ1 += TY(t1, q)
circ1 += TZ(t0, q)
return circ1
def kronecker_decomposition(gate: Gate) -> Circuit:
"""
Decompose a 2-qubit unitary composed of two 1-qubit local gates.
Uses the "Nearest Kronecker Product" algorithm. Will give erratic
results if the gate is not the direct product of two 1-qubit gates.
"""
# An alternative approach would be to take partial traces, but
# this approach appears to be more robust.
if gate.qubit_nb != 2:
raise ValueError('Expected 2-qubit gate')
U = asarray(gate.asoperator())
rank = 2**gate.qubit_nb
U /= np.linalg.det(U) ** (1/rank)
R = np.stack([U[0:2, 0:2].reshape(4),
U[0:2, 2:4].reshape(4),
U[2:4, 0:2].reshape(4),
U[2:4, 2:4].reshape(4)])
u, s, vh = np.linalg.svd(R)
v = vh.transpose()
A = (np.sqrt(s[0]) * u[:, 0]).reshape(2, 2)
B = (np.sqrt(s[0]) * v[:, 0]).reshape(2, 2)
q0, q1 = gate.qubits
g0 = Gate(A, qubits=[q0])
g1 = Gate(B, qubits=[q1])
if not gates_close(gate, Circuit([g0, g1]).asgate()):
raise ValueError("Gate cannot be decomposed into two 1-qubit gates")
circ = Circuit()
circ += zyz_decomposition(g0)
circ += zyz_decomposition(g1)
assert gates_close(gate, circ.asgate()) # Sanity check
return circ
def canonical_coords(gate: Gate) -> Sequence[float]:
"""Returns the canonical coordinates of a 2-qubit gate"""
circ = canonical_decomposition(gate)
gate = circ.elements[6] # type: ignore
params = [gate.params[key] for key in ('tx', 'ty', 'tz')]
return params
def canonical_decomposition(gate: Gate) -> Circuit:
"""Decompose a 2-qubit gate by removing local 1-qubit gates to leave
the non-local canonical two-qubit gate. [1]_ [2]_ [3]_ [4]_
Returns: A Circuit of 5 gates: two initial 1-qubit gates; a CANONICAL
gate, with coordinates in the Weyl chamber; two final 1-qubit gates
The canonical coordinates can be found in circ.elements[2].params
More or less follows the algorithm outlined in [2]_.
.. [1] A geometric theory of non-local two-qubit operations, J. Zhang,
J. Vala, K. B. Whaley, S. Sastry quant-ph/0291120
.. [2] An analytical decomposition protocol for optimal implementation of
two-qubit entangling gates. M. Blaauboer, R.L. de Visser,
cond-mat/0609750
.. [3] Metric structure of two-qubit gates, perfect entangles and quantum
control, P. Watts, M. O'Conner, J. Vala, Entropy (2013)
.. [4] Constructive Quantum Shannon Decomposition from Cartan Involutions
B. Drury, P. Love, arXiv:0806.4015
"""
# Implementation note: The canonical decomposition is easy. Constraining
# canonical coordinates to the Weyl chamber is easy. But doing the
# canonical decomposition with the canonical gate in the Weyl chamber
# proved to be surprisingly tricky.
# Unitary transform to Magic Basis of Bell states
Q = np.asarray([[1, 0, 0, 1j],
[0, 1j, 1, 0],
[0, 1j, -1, 0],
[1, 0, 0, -1j]]) / np.sqrt(2)
Q_H = Q.conj().T
if gate.qubit_nb != 2:
raise ValueError('Expected 2-qubit gate')
U = asarray(gate.asoperator())
rank = 2**gate.qubit_nb
U /= np.linalg.det(U) ** (1/rank) # U is in SU(4) so det U = 1
U_mb = Q_H @ U @ Q # Transform gate to Magic Basis [1, (eq. 17, 18)]
M = U_mb.transpose() @ U_mb # Construct M matrix [1, (eq. 22)]
# Diagonalize symmetric complex matrix
eigvals, eigvecs = _eig_complex_symmetric(M)
lambdas = np.sqrt(eigvals) # Eigenvalues of F
# Lambdas only fixed up to a sign. So make sure det F = 1 as it should
det_F = np.prod(lambdas)
if det_F.real < 0:
lambdas[0] *= -1
coords, signs, perm = _constrain_to_weyl(lambdas)
# Construct local and canonical gates in magic basis
lambdas = (lambdas*signs)[perm]
O2 = (np.diag(signs) @ eigvecs.transpose())[perm]
F = np.diag(lambdas)
O1 = U_mb @ O2.transpose() @ F.conj()
# Sanity check: Make sure O1 and O2 are orthogonal
assert np.allclose(np.eye(4), O2.transpose() @ O2) # Sanity check
assert np.allclose(np.eye(4), O1.transpose() @ O1) # Sanity check
# Sometimes O1 & O2 end up with det = -1, instead of +1 as they should.
# We can commute a diagonal matrix through F to fix this up.
neg = np.diag([-1, 1, 1, 1])
if np.linalg.det(O2).real < 0:
O2 = neg @ O2
O1 = O1 @ neg
# Transform gates back from magic basis
K1 = Q @ O1 @ Q_H
A = Q @ F @ Q_H
K2 = Q @ O2 @ Q_H
assert gates_close(Gate(U), Gate(K1 @ A @ K2)) # Sanity check
canon = CANONICAL(coords[0], coords[1], coords[2], 0, 1)
# Sanity check
assert gates_close(Gate(A, qubits=gate.qubits), canon, tolerance=1e-4)
# Decompose local gates into the two component 1-qubit gates
gateK1 = Gate(K1, qubits=gate.qubits)
circK1 = kronecker_decomposition(gateK1)
assert gates_close(gateK1, circK1.asgate()) # Sanity check
gateK2 = Gate(K2, qubits=gate.qubits)
circK2 = kronecker_decomposition(gateK2)
assert gates_close(gateK2, circK2.asgate()) # Sanity check
# Build and return circuit
circ = Circuit()
circ += circK2
circ += canon
circ += circK1
return circ
def _eig_complex_symmetric(M: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""Diagonalize a complex symmetric matrix. The eigenvalues are
complex, and the eigenvectors form an orthogonal matrix.
Returns:
eigenvalues, eigenvectors
"""
if not np.allclose(M, M.transpose()):
raise np.linalg.LinAlgError('Not a symmetric matrix')
# The matrix of eigenvectors should be orthogonal.
# But the standard 'eig' method will fail to return an orthogonal
# eigenvector matrix when the eigenvalues are degenerate. However,
# both the real and
# imaginary part of M must be symmetric with the same orthogonal
# matrix of eigenvectors. But either the real or imaginary part could
# vanish. So we use a randomized algorithm where we diagonalize a
# random linear combination of real and imaginary parts to find the
# eigenvectors, taking advantage of the 'eigh' subroutine for
# diagonalizing symmetric matrices.
# This can fail if we're very unlucky with our random coefficient, so we
# give the algorithm a few chances to succeed.
# Empirically, never seems to fail on randomly sampled complex
# symmetric 4x4 matrices.
# If failure rate is less than 1 in a million, then 16 rounds
# will have overall failure rate less than 1 in a googol.
# However, cannot (yet) guarantee that there aren't special cases
# which have much higher failure rates.
# GEC 2018
max_attempts = 16
for _ in range(max_attempts):
c = np.random.uniform(0, 1)
matrix = c * M.real + (1-c) * M.imag
_, eigvecs = np.linalg.eigh(matrix)
eigvecs = np.array(eigvecs, dtype=complex)
eigvals = np.diag(eigvecs.transpose() @ M @ eigvecs)
# Finish if we got a correct answer.
reconstructed = eigvecs @ np.diag(eigvals) @ eigvecs.transpose()
if np.allclose(M, reconstructed):
return eigvals, eigvecs
# Should never happen. Hopefully.
raise np.linalg.LinAlgError(
'Cannot diagonalize complex symmetric matrix.') # pragma: no cover
def _lambdas_to_coords(lambdas: Sequence[float]) -> np.ndarray:
# [2, eq.11], but using [1]s coordinates.
l1, l2, _, l4 = lambdas
c1 = np.real(1j * np.log(l1 * l2))
c2 = np.real(1j * np.log(l2 * l4))
c3 = np.real(1j * np.log(l1 * l4))
coords = np.asarray((c1, c2, c3))/pi
coords[np.abs(coords-1) < TOLERANCE] = -1
if all(coords < 0):
coords += 1
# If we're close to the boundary, floating point errors can conspire
# to make it seem that we're never on the inside
# Fix: If near boundary, reset to boundary
# Left
if np.abs(coords[0] - coords[1]) < TOLERANCE:
coords[1] = coords[0]
# Front
if np.abs(coords[1] - coords[2]) < TOLERANCE:
coords[2] = coords[1]
# Right
if np.abs(coords[0]-coords[1]-1/2) < TOLERANCE:
coords[1] = coords[0]-1/2
# Base
coords[np.abs(coords) < TOLERANCE] = 0
return coords
def _constrain_to_weyl(lambdas: Sequence[float]) \
-> Tuple[np.ndarray, np.ndarray, np.ndarray]:
for permutation in itertools.permutations(range(4)):
for signs in ([1, 1, 1, 1], [1, 1, -1, -1],
[-1, 1, -1, 1], [1, -1, -1, 1]):
signed_lambdas = lambdas * np.asarray(signs)
perm = list(permutation)
lambas_perm = signed_lambdas[perm]
coords = _lambdas_to_coords(lambas_perm)
if _in_weyl(*coords):
return coords, np.asarray(signs), perm
# Should never get here
assert False # pragma: no cover
return None, None, None # pragma: no cover
def _in_weyl(tx: float, ty: float, tz: float) -> bool:
# Note 'tz>0' in second term. This takes care of symmetry across base
# when tz==0
return (1/2 >= tx >= ty >= tz >= 0) or (1/2 >= (1-tx) >= ty >= tz > 0)
| [
"gavin@rigetti.com"
] | gavin@rigetti.com |
c0cf1a93d0c439dc1462cb58e24d03106c4bab96 | 3b65bba3cb558cc7671c43bb78f2733fd71019df | /Control_de_flujo/CF_range_00.py | 74c47bc00db9d46b1d4bf8acc652af61327c4738 | [] | no_license | TeoRojas/Curso_Aprende_Python_con_DBZ | 43c6b78690395337a9a3c6a817668d5a24648367 | 0ac2e5b5ab37cf8a6a10c213d348667dc37e4d5a | refs/heads/main | 2023-05-04T09:28:35.257693 | 2021-05-19T06:21:48 | 2021-05-19T06:21:48 | 344,425,303 | 0 | 0 | null | 2021-03-11T17:54:59 | 2021-03-04T09:51:46 | Python | UTF-8 | Python | false | false | 262 | py | #Cuenta las dominadas que hace Goku en tres series.
for num_serie in range(3):
print("\t\tNúmero de serie: " + str(num_serie))
print("\t\t---------------")
for num_dominada in range(10):
print("Dominada número " + str(num_dominada)) | [
"teofilo.rojas.mata@gmail.com"
] | teofilo.rojas.mata@gmail.com |
77e601a020633f2bd5926dd5641f9599dbe21200 | a15c2500f946df3f96e83f30a1007782acbceb4e | /EulerProblem2.py | fd6c1f555f9c0adc72fc05e5ea5524d2a7e482c9 | [] | no_license | NahidS/python-euler | 69e6460d07b9f7d3fbfbddf050784721b54b9675 | d5500b9e64f75198c38dc9cc7b759bf10a758ce1 | refs/heads/master | 2020-09-07T04:52:44.993839 | 2020-07-24T21:22:31 | 2020-07-24T21:22:31 | 220,661,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | fiboseq = [1, 2]
index = 0
sum = 2
while fiboseq[index] <= 4000000:
index = len(fiboseq)
fiboseq.append(fiboseq[index - 2] + fiboseq[index - 1])
if (fiboseq[index] % 2) == 0:
sum += fiboseq[index]
print(sum) | [
"nahid.seidi@gmail.com"
] | nahid.seidi@gmail.com |
1b8c125997ef3a77fb0b96159c38de62b10bac4d | 52108133711e8c7c1bdb5f39b126dc1108da4ad1 | /guia/capitulo_2/cap2_proyectenv/bin/sqlformat | 1451a0fa3b948ac1b5eda356edd2bed5477401e0 | [] | no_license | overdavid/mi_primer_repositorio | 562be7aa5d205cda06c82cf2d0300f49cba67bb6 | dedbcfefc8c839b5c7cfb1b49f8096b2b14897b4 | refs/heads/master | 2022-12-26T13:26:12.403105 | 2020-10-05T18:53:08 | 2020-10-05T18:53:08 | 296,084,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | #!/home/oc-admin/guia/capitulo_2/cap2_proyectenv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"overdavid.odlm@gmail.com"
] | overdavid.odlm@gmail.com | |
125c3649da85a64187e7b857a6881b67c46acab6 | c140ccf655de5d95087eaf33e84e0b004dc78dbd | /test_package/conanfile.py | c05e00eb39148f55c47a631fc665d2e3dff25bf0 | [] | no_license | kenfred/conan-flatbuffers | fbc4aaed6d26e635b5a91673a80e300cd42efe4c | 73b166984ac9152d9aa16dbfcb900659b0363e77 | refs/heads/master | 2021-04-26T23:04:31.359901 | 2018-03-05T13:49:50 | 2018-03-05T13:49:50 | 123,926,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | from conans import ConanFile, CMake
import os
class FlatbuffersTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
# Current dir is "test_package/build/<build_id>" and CMakeLists.txt is in "test_package"
cmake.configure(source_dir=self.conanfile_directory, build_dir="./")
cmake.build()
def imports(self):
self.copy("*.dll", dst="bin", src="bin")
self.copy("*.dylib*", dst="bin", src="lib")
self.copy('*.so*', dst='bin', src='lib')
def test(self):
os.chdir("bin")
self.run(".%sexample" % os.sep)
| [
"kenfred@gmail.com"
] | kenfred@gmail.com |
50c63fcbad385a2c6ecc0ce98108b05f2c4c4351 | 27a241145cb2cc080aef278e5ca63e62434f61a9 | /.ipynb_checkpoints/train-checkpoint.py | a0aa064ec757b9402c79a88ca9ae327835ef244f | [] | no_license | bharati-21/AZMLND_Optimizing_a_Pipeline_in_Azure | 245da74ae5b4093987b85e6e04ceea8f0ef9ccb7 | 5ac9192bb7270b974e580b5f4d00da563cd2fd75 | refs/heads/master | 2023-02-11T04:30:52.041716 | 2021-01-09T16:16:19 | 2021-01-09T16:16:19 | 327,518,676 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,130 | py | from sklearn.linear_model import LogisticRegression
import argparse
import os
import numpy as np
from sklearn.metrics import mean_squared_error
import joblib
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
import pandas as pd
from azureml.core.run import Run
from azureml.core import Dataset
from azureml.data.dataset_factory import TabularDatasetFactory
def clean_data(data):
# Dict for cleaning data
months = {"jan":1, "feb":2, "mar":3, "apr":4, "may":5, "jun":6, "jul":7, "aug":8, "sep":9, "oct":10, "nov":11, "dec":12}
weekdays = {"mon":1, "tue":2, "wed":3, "thu":4, "fri":5, "sat":6, "sun":7}
# Clean and one hot encode data
x_df = data.to_pandas_dataframe().dropna()
jobs = pd.get_dummies(x_df.job, prefix="job")
x_df.drop("job", inplace=True, axis=1)
x_df = x_df.join(jobs)
x_df["marital"] = x_df.marital.apply(lambda s: 1 if s == "married" else 0)
x_df["default"] = x_df.default.apply(lambda s: 1 if s == "yes" else 0)
x_df["housing"] = x_df.housing.apply(lambda s: 1 if s == "yes" else 0)
x_df["loan"] = x_df.loan.apply(lambda s: 1 if s == "yes" else 0)
contact = pd.get_dummies(x_df.contact, prefix="contact")
x_df.drop("contact", inplace=True, axis=1)
x_df = x_df.join(contact)
education = pd.get_dummies(x_df.education, prefix="education")
x_df.drop("education", inplace=True, axis=1)
x_df = x_df.join(education)
x_df["month"] = x_df.month.map(months)
x_df["day_of_week"] = x_df.day_of_week.map(weekdays)
x_df["poutcome"] = x_df.poutcome.apply(lambda s: 1 if s == "success" else 0)
y_df = x_df.pop("y").apply(lambda s: 1 if s == "yes" else 0)
return x_df, y_df
def main():
# Add arguments to script
parser = argparse.ArgumentParser()
parser.add_argument('--C', type=float, default=1.0, help="Inverse of regularization strength. Smaller values cause stronger regularization")
parser.add_argument('--max_iter', type=int, default=100, help="Maximum number of iterations to converge")
args = parser.parse_args()
### YOUR CODE HERE ###
# TODO: Create TabularDataset using TabularDatasetFactory
# Data is located at:
url_path = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/bankmarketing_train.csv"
ds = Dataset.Tabular.from_delimited_files(path=url_path)
print(ds.to_pandas_dataframe())
x, y = clean_data(ds)
# TODO: Split data into train and test sets.
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=0.3)
print(x_train.shape, x_test.shape, y_train.shape, y_test.shape)
run = Run.get_context()
run.log("Regularization Strength:", np.float(args.C))
run.log("Max iterations:", np.int(args.max_iter))
model = LogisticRegression(C=args.C, max_iter=args.max_iter).fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
run.log("accuracy", np.float(accuracy))
os.makedirs('./outputs', exist_ok=True)
joblib.dump(value=model,filename='./outputs/model.joblib')
if __name__ == '__main__':
main()
| [
"bharatisharada@gmail.com"
] | bharatisharada@gmail.com |
4426687fcdb98f8446d4f07841bc72249015469b | 5173c3e3956387a3f2ae8fcf4aed7c7a600dac78 | /Programmers/Programmers_입국심사.py | 0b401b3a4fa57dd39d85c7899098df041a3e441f | [] | no_license | ma0723/Min_Algorithm | df75f53f6e89b7817d4b52d686effb8236a4ddac | b02d1043008cb32e22daa9d4207b9a45f111d66f | refs/heads/master | 2023-07-25T11:00:15.397093 | 2021-08-30T02:08:05 | 2021-08-30T02:08:05 | 375,613,927 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | def solution(n, times):
# 입국심사를 기다리는 사람 수 n
# 한 명을 심사하는데 걸리는 시간이 담긴 배열 times
answer = 0
left = 1
# 최소 시간
right = n * max(times)
# 최대 시간
while left <= right:
mid = (left + right) // 2
people = 0
for time in times:
people += mid // time
# 설정된 시간동안 각 심사대 처리 사람수
if people >= n:
# n명이 넘어가면
answer = mid
right = mid - 1
# mid 중간값보다 작은 값 탐색
break
# 시간초과 방지
# for문 종료
if people < n:
# for문을 모두 순회하고 처리한 사람이 n명이 충족하지 못하면
left = mid + 1
# mid 중 | [
"ma0723@naver.com"
] | ma0723@naver.com |
f3972ea96cfbd07b4c2b4484703e8f7af2d6444d | b9eeb5f95ba6cd6d255e4796c45781092999e4c8 | /codeProblemOne.py | 4b4558a26aa056b56f19a6c97e546347a19569af | [] | no_license | manzhangfan/leetcode | 1c7992f4877a5a441d0b62c31683d893037d5bb6 | 08d558839ef085aafe4b9e6653d52efd56637ff0 | refs/heads/master | 2020-05-05T13:07:49.799165 | 2019-04-09T02:39:44 | 2019-04-09T02:39:44 | 54,266,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | '''
1.Two Sum
Given an array of integers, return indices of the two numbers such that they add up to a specific target.
You may assume that each input would have exactly one solution, and you may not use the same element twice.
'''
class Solution:
#Approch One faster
def twoSum(self,nums:List[int],target:int)->List[int]:
hashmap={}
for i in range(len(nums):
x=nums[i]
if x in hashmap:
return [hashmap[x],i]
else:
hashpmap[target-x]=i
#Approch Two
def twosum(self,nums:List[int],target:int)->List[int]:
result=[]
for i in range(len(nums)-1):
for j in range(i+1,len(nums)):
if nums[i]+nums[j]==target:
result.append(i)
result.append(j)
returen result | [
"1074227613@qq.com"
] | 1074227613@qq.com |
7d8e3e17a3cd51c5b32a576eb08fbe64e91ce972 | fb92125b2236736cc89eee2d4e5ce84f6bc7fa0d | /python 从入门到项目实践(全彩)/0514/six_元组.py | 7db2b100d2d4b75efdbd4faa91ce2f1477abe7ed | [] | no_license | YunShen1994/python_program_demo | f3ba28cda82d198c1d44255d2f43f0ee3dd157b4 | 066aaff45e65854ac3a740a8db70760a323827ae | refs/heads/master | 2020-05-20T11:27:08.783873 | 2019-05-16T02:05:03 | 2019-05-16T02:05:03 | 185,550,178 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,930 | py | # *_* :UTF-8 *_*
#来源 :《python从入门到项目实践》
#开发时间 : 2019/5/14
#文件名称 :six_元组.py
#开发工具 :PyCharm
#元组
'''
形式上放在( )中,元素之间用逗号隔开,元素类型可不同
'''
'''
元组,列表的区别:
主要区别:元组是不可改变序列,列表是可变序列。
即元组中的元素不可以单独修改,而列表中的飚速可以任意修改
'''
'''
元素的创建和删除
创建:与列表类似
如果要创建的彦祖只包括扩一个元素,则性需要在定义元组时,
在元素的后面加一个逗号,若不加逗号,就会成为定义字符串
'''
vers1 = ("世界杯冠军",)
print(vers1)
print(type(vers1))
vers2 = ("世界杯冠军")
print(vers2)
print(type(vers2))
emptytuple = ()#创建空元组
'''
tuple(data)
data表示可以转化为元组的数据,
其类型可以是range队形、字符串、元组或者其他可迭代类型的数据
'''
#创建一个10~20之间(不包括20)的偶数的元组
print()
two_n = tuple(range(10,20,2))
print(two_n)
#删除元组
del two_n
#print(two_n)会报错
'''
访问元组数据:
'''
untitle = ('python',28,('人生苦短','我用python'),['爬虫','云计算'])
print(untitle)
print(untitle[0])
print(untitle[:3])#输出元组中的前三个元素
#同列表 元组也可以用for循环进行遍历
'''
修改元组元素,不可单独修改,可对元组重新赋值
'''
player = ('A','B','C','D','E','F','G')
player =('X','Y','Z',)
print("新元素",player)
player1 = ('H','I')
player2 = player + player1
print("组合后:",player2)
'''
元组推导式:同列表类似
'''
import random
randomnum = (random.randint(10,100) for x in range(10))
print(randomnum)
'''
元组推导式生成的结果并不是一个元组或者列表,而是一个
生成器对象,这一点和列表不同,需要使用该生成器对象可以
将其转换为元组或者列表 其中,转换为元组需要使用tuple()
函数,转换为列表需要list()函数
'''
randomnum = tuple(randomnum)
print(randomnum)
'''
还可以直接通过for循环变量或者直接使用__next()__方法进行遍历
在python2.x中,__next()__对应的方法为next()方法,也是用于
遍历生成器的对象
'''
number = (i for i in range(3))
print(number.__next__())
print(number.__next__())
print(number.__next__())
number = tuple(number)
print("转化后:",number)
print()
number = (i for i in range(4))
for i in number:
print(i,end = " ")
print(tuple(number))
#无论用那种方式,都要重新闯进一个生成器,
# 因为遍历后元生成器的对象已经不存在了
'''
元组与列表的区别:
都属于序列,都可以按照特定的顺序放一组元素,
类型又不受限制
区别:
列表:在纸上用铅笔写字,错了还可以擦掉
元组:在纸上用钢笔写字,错了只能换纸重新写
1.列表属于可变序列,他的元素可以随时修改或者删除;
而元组属于不可变序列,其中的元素不可以修改,除非整体替换
2.列表可以用append(),extend(),insert(),remove(),pop(),
等方法实现添加和修改列表元素,而元组没有这几个方法,
因为不能同时向元组中添加和修改元素,同样也不能删除元素
3.列表可以使用切片访问和修改列表中的元素;元组也支持切片,
但是它只支持通过切片访问元组中的元素,不支持修改
4.元组比列表的访问和处理速度快,所以只需对其中的与只能
进行访问,而不进行任何修改,建议使用元组
5.列表不能作为字典键,而元组却可以
'''
print() | [
"1553134412@qq.com"
] | 1553134412@qq.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.