hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf9d3e190ba02614cfd1ddce4ddb2aba5043ab6 | 2,848 | py | Python | examples/notebooks-py/tellurium_plotting.py | kirichoi/tellurium | 77cf6e794600587741ebe209644a78051e0db1d5 | [
"Apache-2.0"
] | 73 | 2016-06-13T12:44:28.000Z | 2021-12-31T14:44:39.000Z | examples/notebooks-py/tellurium_plotting.py | kirichoi/tellurium | 77cf6e794600587741ebe209644a78051e0db1d5 | [
"Apache-2.0"
] | 461 | 2015-03-26T00:05:16.000Z | 2022-03-16T17:24:35.000Z | examples/notebooks-py/tellurium_plotting.py | kirichoi/tellurium | 77cf6e794600587741ebe209644a78051e0db1d5 | [
"Apache-2.0"
] | 30 | 2016-01-18T16:50:54.000Z | 2021-07-06T09:29:53.000Z |
# coding: utf-8
# Back to the main [Index](../index.ipynb)
# #### Draw diagram
# In[1]:
#!!! DO NOT CHANGE !!! THIS FILE WAS CREATED AUTOMATICALLY FROM NOTEBOOKS !!! CHANGES WILL BE OVERWRITTEN !!! CHANGE CORRESPONDING NOTEBOOK FILE !!!
from __future__ import print_function
import tellurium as te
r = te.loada('''
model feedback()
// Reactions:http://localhost:8888/notebooks/core/tellurium_export.ipynb#
J0: $X0 -> S1; (VM1 * (X0 - S1/Keq1))/(1 + X0 + S1 + S4^h);
J1: S1 -> S2; (10 * S1 - 2 * S2) / (1 + S1 + S2);
J2: S2 -> S3; (10 * S2 - 2 * S3) / (1 + S2 + S3);
J3: S3 -> S4; (10 * S3 - 2 * S4) / (1 + S3 + S4);
J4: S4 -> $X1; (V4 * S4) / (KS4 + S4);
// Species initializations:
S1 = 0; S2 = 0; S3 = 0;
S4 = 0; X0 = 10; X1 = 0;
// Variable initialization:
VM1 = 10; Keq1 = 10; h = 10; V4 = 2.5; KS4 = 0.5;
end''')
# simulate using variable step size
r.integrator.setValue('variable_step_size', True)
s = r.simulate(0, 50)
# draw the diagram
r.draw(width=200)
# and the plot
r.plot(s, title="Feedback Oscillations", ylabel="concentration", alpha=0.9);
# #### Plotting multiple simulations
# All plotting is done via the `r.plot` or `te.plotArray` functions. To plot multiple curves in one figure use the `show=False` setting.
# In[2]:
import tellurium as te
import numpy as np
import matplotlib.pylab as plt
# Load a model and carry out a simulation generating 100 points
r = te.loada ('S1 -> S2; k1*S1; k1 = 0.1; S1 = 10')
r.draw(width=100)
# get colormap
# Colormap instances are used to convert data values (floats) from the interval [0, 1]
cmap = plt.get_cmap('Blues')
# The legend can be suppressed which is useful for plotting large
# numbers of curves where a legend would get in the way
k1_values = np.linspace(start=0.1, stop=1.5, num=15)
max_k1 = max(k1_values)
for k, value in enumerate(k1_values):
r.reset()
r.k1 = value
s = r.simulate(0, 30, 100)
color = cmap((value+max_k1)/(2*max_k1))
# plot curves without legend and showing
r.plot(s, loc=None, show=False, color=color, linewidth=2.0)
# add legend for last curve, show everything and set labels, titels, ...
r.plot(s, loc='upper right', show=True, color=color, linewidth=2.0,
title="Parameter variation k1", xlabel="time", ylabel="concentration",
xlim=[-1, 31], ylim=[-0.1, 11], grid=True)
print('Reference Simulation: k1 = {}'.format(r.k1))
print('Parameter variation: k1 = {}'.format(k1_values))
# #### Logarithmic axis
# The axis scale can be adapted with the `xscale` and `yscale` settings.
# In[3]:
import tellurium as te
r = te.loadTestModel('feedback.xml')
r.integrator.setSetting('variable_step_size', True)
s = r.simulate(0, 50)
r.plot(s, xscale="log", xlim=[10E-4, 10E2], grid=True,
title="Logarithmic x-Axis with grid", ylabel="concentration");
# In[4]:
| 29.360825 | 148 | 0.65309 |
acf9d468339b98a481b54644189c37358b460936 | 45,215 | py | Python | gcloud/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/cloudscheduler/v1beta1/cloudscheduler_v1beta1_messages.py | bopopescu/JobSniperRails | 39e7f871887176770de0f4fc6789e9ddc7f32b1f | [
"MIT"
] | null | null | null | gcloud/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/cloudscheduler/v1beta1/cloudscheduler_v1beta1_messages.py | bopopescu/JobSniperRails | 39e7f871887176770de0f4fc6789e9ddc7f32b1f | [
"MIT"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | gcloud/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/cloudscheduler/v1beta1/cloudscheduler_v1beta1_messages.py | bopopescu/JobSniperRails | 39e7f871887176770de0f4fc6789e9ddc7f32b1f | [
"MIT"
] | 1 | 2020-07-24T18:47:35.000Z | 2020-07-24T18:47:35.000Z | """Generated message classes for cloudscheduler version v1beta1.
Creates and manages jobs run on a regular recurring schedule.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'cloudscheduler'
class AppEngineHttpTarget(_messages.Message):
r"""App Engine target. The job will be pushed to a job handler by means of
an HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job
is acknowledged by means of an HTTP response code in the range [200 - 299].
Error 503 is considered an App Engine system error instead of an application
error. Requests returning error 503 will be retried regardless of retry
configuration and not counted against retry counts. Any other response code,
or a failure to receive a response before the deadline, constitutes a failed
attempt.
Enums:
HttpMethodValueValuesEnum: The HTTP method to use for the request. PATCH
and OPTIONS are not permitted.
Messages:
HeadersValue: HTTP request headers. This map contains the header field
names and values. Headers can be set when the job is created. Cloud
Scheduler sets some headers to default values: * `User-Agent`: By
default, this header is `"AppEngine-Google;
(+http://code.google.com/appengine)"`. This header can be modified,
but Cloud Scheduler will append `"AppEngine-Google;
(+http://code.google.com/appengine)"` to the modified `User-Agent`. *
`X-CloudScheduler`: This header will be set to true. If the job has an
body, Cloud Scheduler sets the following headers: * `Content-Type`: By
default, the `Content-Type` header is set to `"application/octet-
stream"`. The default can be overridden by explictly setting `Content-
Type` to a particular media type when the job is created. For
example, `Content-Type` can be set to `"application/json"`. * `Content-
Length`: This is computed by Cloud Scheduler. This value is output
only. It cannot be changed. The headers below are output only. They
cannot be set or overridden: * `X-Google-*`: For Google internal use
only. * `X-AppEngine-*`: For Google internal use only. In addition,
some App Engine headers, which contain job-specific information, are
also be sent to the job handler.
Fields:
appEngineRouting: App Engine Routing setting for the job.
body: Body. HTTP request body. A request body is allowed only if the HTTP
method is POST or PUT. It will result in invalid argument error to set a
body on a job with an incompatible HttpMethod.
headers: HTTP request headers. This map contains the header field names
and values. Headers can be set when the job is created. Cloud Scheduler
sets some headers to default values: * `User-Agent`: By default, this
header is `"AppEngine-Google; (+http://code.google.com/appengine)"`.
This header can be modified, but Cloud Scheduler will append
`"AppEngine-Google; (+http://code.google.com/appengine)"` to the
modified `User-Agent`. * `X-CloudScheduler`: This header will be set to
true. If the job has an body, Cloud Scheduler sets the following
headers: * `Content-Type`: By default, the `Content-Type` header is set
to `"application/octet-stream"`. The default can be overridden by
explictly setting `Content-Type` to a particular media type when the
job is created. For example, `Content-Type` can be set to
`"application/json"`. * `Content-Length`: This is computed by Cloud
Scheduler. This value is output only. It cannot be changed. The
headers below are output only. They cannot be set or overridden: *
`X-Google-*`: For Google internal use only. * `X-AppEngine-*`: For
Google internal use only. In addition, some App Engine headers, which
contain job-specific information, are also be sent to the job handler.
httpMethod: The HTTP method to use for the request. PATCH and OPTIONS are
not permitted.
relativeUri: The relative URI. The relative URL must begin with "/" and
must be a valid HTTP relative URL. It can contain a path, query string
arguments, and `#` fragments. If the relative URL is empty, then the
root path "/" will be used. No spaces are allowed, and the maximum
length allowed is 2083 characters.
"""
class HttpMethodValueValuesEnum(_messages.Enum):
r"""The HTTP method to use for the request. PATCH and OPTIONS are not
permitted.
Values:
HTTP_METHOD_UNSPECIFIED: HTTP method unspecified. Defaults to POST.
POST: HTTP POST
GET: HTTP GET
HEAD: HTTP HEAD
PUT: HTTP PUT
DELETE: HTTP DELETE
PATCH: HTTP PATCH
OPTIONS: HTTP OPTIONS
"""
HTTP_METHOD_UNSPECIFIED = 0
POST = 1
GET = 2
HEAD = 3
PUT = 4
DELETE = 5
PATCH = 6
OPTIONS = 7
@encoding.MapUnrecognizedFields('additionalProperties')
class HeadersValue(_messages.Message):
r"""HTTP request headers. This map contains the header field names and
values. Headers can be set when the job is created. Cloud Scheduler sets
some headers to default values: * `User-Agent`: By default, this header
is `"AppEngine-Google; (+http://code.google.com/appengine)"`. This
header can be modified, but Cloud Scheduler will append `"AppEngine-
Google; (+http://code.google.com/appengine)"` to the modified `User-
Agent`. * `X-CloudScheduler`: This header will be set to true. If the job
has an body, Cloud Scheduler sets the following headers: * `Content-
Type`: By default, the `Content-Type` header is set to `"application
/octet-stream"`. The default can be overridden by explictly setting
`Content-Type` to a particular media type when the job is created. For
example, `Content-Type` can be set to `"application/json"`. * `Content-
Length`: This is computed by Cloud Scheduler. This value is output only.
It cannot be changed. The headers below are output only. They cannot be
set or overridden: * `X-Google-*`: For Google internal use only. *
`X-AppEngine-*`: For Google internal use only. In addition, some App
Engine headers, which contain job-specific information, are also be sent
to the job handler.
Messages:
AdditionalProperty: An additional property for a HeadersValue object.
Fields:
additionalProperties: Additional properties of type HeadersValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a HeadersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
appEngineRouting = _messages.MessageField('AppEngineRouting', 1)
body = _messages.BytesField(2)
headers = _messages.MessageField('HeadersValue', 3)
httpMethod = _messages.EnumField('HttpMethodValueValuesEnum', 4)
relativeUri = _messages.StringField(5)
class AppEngineRouting(_messages.Message):
r"""App Engine Routing. For more information about services, versions, and
instances see [An Overview of App
Engine](https://cloud.google.com/appengine/docs/python/an-overview-of-app-
engine), [Microservices Architecture on Google App
Engine](https://cloud.google.com/appengine/docs/python/microservices-on-app-
engine), [App Engine Standard request
routing](https://cloud.google.com/appengine/docs/standard/python/how-
requests-are-routed), and [App Engine Flex request
routing](https://cloud.google.com/appengine/docs/flexible/python/how-
requests-are-routed).
Fields:
host: Output only. The host that the job is sent to. For more information
about how App Engine requests are routed, see
[here](https://cloud.google.com/appengine/docs/standard/python/how-
requests-are-routed). The host is constructed as: * `host =
[application_domain_name]`</br> `| [service] + '.' +
[application_domain_name]`</br> `| [version] + '.' +
[application_domain_name]`</br> `| [version_dot_service]+ '.' +
[application_domain_name]`</br> `| [instance] + '.' +
[application_domain_name]`</br> `| [instance_dot_service] + '.' +
[application_domain_name]`</br> `| [instance_dot_version] + '.' +
[application_domain_name]`</br> `| [instance_dot_version_dot_service]
+ '.' + [application_domain_name]` * `application_domain_name` = The
domain name of the app, for example <app-id>.appspot.com, which is
associated with the job's project ID. * `service =` service *
`version =` version * `version_dot_service =` version `+ '.' +`
service * `instance =` instance * `instance_dot_service =` instance
`+ '.' +` service * `instance_dot_version =` instance `+ '.' +`
version * `instance_dot_version_dot_service =` instance `+ '.' +`
version `+ '.' +` service If service is empty, then the job will be
sent to the service which is the default service when the job is
attempted. If version is empty, then the job will be sent to the
version which is the default version when the job is attempted. If
instance is empty, then the job will be sent to an instance which is
available when the job is attempted. If service, version, or instance
is invalid, then the job will be sent to the default version of the
default service when the job is attempted.
instance: App instance. By default, the job is sent to an instance which
is available when the job is attempted. Requests can only be sent to a
specific instance if [manual scaling is used in App Engine
Standard](https://cloud.google.com/appengine/docs/python/an-overview-of-
app-engine?hl=en_US#scaling_types_and_instance_classes). App Engine Flex
does not support instances. For more information, see [App Engine
Standard request
routing](https://cloud.google.com/appengine/docs/standard/python/how-
requests-are-routed) and [App Engine Flex request
routing](https://cloud.google.com/appengine/docs/flexible/python/how-
requests-are-routed).
service: App service. By default, the job is sent to the service which is
the default service when the job is attempted.
version: App version. By default, the job is sent to the version which is
the default version when the job is attempted.
"""
host = _messages.StringField(1)
instance = _messages.StringField(2)
service = _messages.StringField(3)
version = _messages.StringField(4)
class CloudschedulerProjectsLocationsGetRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsGetRequest object.
Fields:
name: Resource name for the location.
"""
name = _messages.StringField(1, required=True)
class CloudschedulerProjectsLocationsJobsCreateRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsCreateRequest object.
Fields:
job: A Job resource to be passed as the request body.
parent: Required. The location name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID`.
"""
job = _messages.MessageField('Job', 1)
parent = _messages.StringField(2, required=True)
class CloudschedulerProjectsLocationsJobsDeleteRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsDeleteRequest object.
Fields:
name: Required. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`.
"""
name = _messages.StringField(1, required=True)
class CloudschedulerProjectsLocationsJobsGetRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsGetRequest object.
Fields:
name: Required. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`.
"""
name = _messages.StringField(1, required=True)
class CloudschedulerProjectsLocationsJobsListRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsListRequest object.
Fields:
pageSize: Requested page size. The maximum page size is 500. If
unspecified, the page size will be the maximum. Fewer jobs than
requested might be returned, even if more jobs exist; use
next_page_token to determine if more jobs exist.
pageToken: A token identifying a page of results the server will return.
To request the first page results, page_token must be empty. To request
the next page of results, page_token must be the value of
next_page_token returned from the previous call to ListJobs. It is an
error to switch the value of filter or order_by while iterating through
pages.
parent: Required. The location name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID`.
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class CloudschedulerProjectsLocationsJobsPatchRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsPatchRequest object.
Fields:
job: A Job resource to be passed as the request body.
name: Optionally caller-specified in CreateJob, after which it becomes
output only. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`. * `PROJECT_ID`
can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons
(:), or periods (.). For more information, see [Identifying
projects](https://cloud.google.com/resource-manager/docs/creating-
managing-projects#identifying_projects) * `LOCATION_ID` is the canonical
ID for the job's location. The list of available locations can be
obtained by calling ListLocations. For more information, see
https://cloud.google.com/about/locations/. * `JOB_ID` can contain only
letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (_).
The maximum length is 500 characters.
updateMask: A mask used to specify which fields of the job are being
updated.
"""
job = _messages.MessageField('Job', 1)
name = _messages.StringField(2, required=True)
updateMask = _messages.StringField(3)
class CloudschedulerProjectsLocationsJobsPauseRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsPauseRequest object.
Fields:
name: Required. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`.
pauseJobRequest: A PauseJobRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
pauseJobRequest = _messages.MessageField('PauseJobRequest', 2)
class CloudschedulerProjectsLocationsJobsResumeRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsResumeRequest object.
Fields:
name: Required. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`.
resumeJobRequest: A ResumeJobRequest resource to be passed as the request
body.
"""
name = _messages.StringField(1, required=True)
resumeJobRequest = _messages.MessageField('ResumeJobRequest', 2)
class CloudschedulerProjectsLocationsJobsRunRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsJobsRunRequest object.
Fields:
name: Required. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`.
runJobRequest: A RunJobRequest resource to be passed as the request body.
"""
name = _messages.StringField(1, required=True)
runJobRequest = _messages.MessageField('RunJobRequest', 2)
class CloudschedulerProjectsLocationsListRequest(_messages.Message):
r"""A CloudschedulerProjectsLocationsListRequest object.
Fields:
filter: The standard list filter.
name: The resource that owns the locations collection, if applicable.
pageSize: The standard list page size.
pageToken: The standard list page token.
"""
filter = _messages.StringField(1)
name = _messages.StringField(2, required=True)
pageSize = _messages.IntegerField(3, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(4)
class Empty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo {
rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The
JSON representation for `Empty` is empty JSON object `{}`.
"""
class HttpTarget(_messages.Message):
r"""Http target. The job will be pushed to the job handler by means of an
HTTP request via an http_method such as HTTP POST, HTTP GET, etc. The job is
acknowledged by means of an HTTP response code in the range [200 - 299]. A
failure to receive a response constitutes a failed execution. For a
redirected request, the response returned by the redirected request is
considered.
Enums:
HttpMethodValueValuesEnum: Which HTTP method to use for the request.
Messages:
HeadersValue: The user can specify HTTP request headers to send with the
job's HTTP request. This map contains the header field names and values.
Repeated headers are not supported, but a header value can contain
commas. These headers represent a subset of the headers that will
accompany the job's HTTP request. Some HTTP request headers will be
ignored or replaced. A partial list of headers that will be ignored or
replaced is below: - Host: This will be computed by Cloud Scheduler and
derived from uri. * `Content-Length`: This will be computed by Cloud
Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-
Scheduler"`. * `X-Google-*`: Google internal use only. *
`X-AppEngine-*`: Google internal use only. The total size of headers
must be less than 80KB.
Fields:
body: HTTP request body. A request body is allowed only if the HTTP method
is POST, PUT, or PATCH. It is an error to set body on a job with an
incompatible HttpMethod.
headers: The user can specify HTTP request headers to send with the job's
HTTP request. This map contains the header field names and values.
Repeated headers are not supported, but a header value can contain
commas. These headers represent a subset of the headers that will
accompany the job's HTTP request. Some HTTP request headers will be
ignored or replaced. A partial list of headers that will be ignored or
replaced is below: - Host: This will be computed by Cloud Scheduler and
derived from uri. * `Content-Length`: This will be computed by Cloud
Scheduler. * `User-Agent`: This will be set to `"Google-Cloud-
Scheduler"`. * `X-Google-*`: Google internal use only. *
`X-AppEngine-*`: Google internal use only. The total size of headers
must be less than 80KB.
httpMethod: Which HTTP method to use for the request.
oauthToken: If specified, an [OAuth
token](https://developers.google.com/identity/protocols/OAuth2) will be
generated and attached as an `Authorization` header in the HTTP request.
This type of authorization should generally only be used when calling
Google APIs hosted on *.googleapis.com.
oidcToken: If specified, an
[OIDC](https://developers.google.com/identity/protocols/OpenIDConnect)
token will be generated and attached as an `Authorization` header in the
HTTP request. This type of authorization can be used for many
scenarios, including calling Cloud Run, or endpoints where you intend to
validate the token yourself.
uri: Required. The full URI path that the request will be sent to. This
string must begin with either "http://" or "https://". Some examples of
valid values for uri are: `http://acme.com` and
`https://acme.com/sales:8080`. Cloud Scheduler will encode some
characters for safety and compatibility. The maximum allowed URL length
is 2083 characters after encoding.
"""
class HttpMethodValueValuesEnum(_messages.Enum):
r"""Which HTTP method to use for the request.
Values:
HTTP_METHOD_UNSPECIFIED: HTTP method unspecified. Defaults to POST.
POST: HTTP POST
GET: HTTP GET
HEAD: HTTP HEAD
PUT: HTTP PUT
DELETE: HTTP DELETE
PATCH: HTTP PATCH
OPTIONS: HTTP OPTIONS
"""
HTTP_METHOD_UNSPECIFIED = 0
POST = 1
GET = 2
HEAD = 3
PUT = 4
DELETE = 5
PATCH = 6
OPTIONS = 7
@encoding.MapUnrecognizedFields('additionalProperties')
class HeadersValue(_messages.Message):
r"""The user can specify HTTP request headers to send with the job's HTTP
request. This map contains the header field names and values. Repeated
headers are not supported, but a header value can contain commas. These
headers represent a subset of the headers that will accompany the job's
HTTP request. Some HTTP request headers will be ignored or replaced. A
partial list of headers that will be ignored or replaced is below: - Host:
This will be computed by Cloud Scheduler and derived from uri. * `Content-
Length`: This will be computed by Cloud Scheduler. * `User-Agent`: This
will be set to `"Google-Cloud-Scheduler"`. * `X-Google-*`: Google internal
use only. * `X-AppEngine-*`: Google internal use only. The total size of
headers must be less than 80KB.
Messages:
AdditionalProperty: An additional property for a HeadersValue object.
Fields:
additionalProperties: Additional properties of type HeadersValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a HeadersValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
body = _messages.BytesField(1)
headers = _messages.MessageField('HeadersValue', 2)
httpMethod = _messages.EnumField('HttpMethodValueValuesEnum', 3)
oauthToken = _messages.MessageField('OAuthToken', 4)
oidcToken = _messages.MessageField('OidcToken', 5)
uri = _messages.StringField(6)
class Job(_messages.Message):
r"""Configuration for a job. The maximum allowed size for a job is 100KB.
Enums:
StateValueValuesEnum: Output only. State of the job.
Fields:
appEngineHttpTarget: App Engine HTTP target.
attemptDeadline: The deadline for job attempts. If the request handler
does not respond by this deadline then the request is cancelled and the
attempt is marked as a `DEADLINE_EXCEEDED` failure. The failed attempt
can be viewed in execution logs. Cloud Scheduler will retry the job
according to the RetryConfig. The allowed duration for this deadline
is: * For HTTP targets, between 15 seconds and 30 minutes. * For App
Engine HTTP targets, between 15 seconds and 24 hours. * For PubSub
targets, this field is ignored.
description: Optionally caller-specified in CreateJob or UpdateJob. A
human-readable description for the job. This string must not contain
more than 500 characters.
httpTarget: HTTP target.
lastAttemptTime: Output only. The time the last job attempt started.
name: Optionally caller-specified in CreateJob, after which it becomes
output only. The job name. For example:
`projects/PROJECT_ID/locations/LOCATION_ID/jobs/JOB_ID`. * `PROJECT_ID`
can contain letters ([A-Za-z]), numbers ([0-9]), hyphens (-), colons
(:), or periods (.). For more information, see [Identifying
projects](https://cloud.google.com/resource-manager/docs/creating-
managing-projects#identifying_projects) * `LOCATION_ID` is the canonical
ID for the job's location. The list of available locations can be
obtained by calling ListLocations. For more information, see
https://cloud.google.com/about/locations/. * `JOB_ID` can contain only
letters ([A-Za-z]), numbers ([0-9]), hyphens (-), or underscores (_).
The maximum length is 500 characters.
pubsubTarget: Pub/Sub target.
retryConfig: Settings that determine the retry behavior.
schedule: Required, except when used with UpdateJob. Describes the
schedule on which the job will be executed. The schedule can be either
of the following types: *
[Crontab](http://en.wikipedia.org/wiki/Cron#Overview) * English-like
[schedule](https://cloud.google.com/scheduler/docs/configuring/cron-job-
schedules) As a general rule, execution `n + 1` of a job will not begin
until execution `n` has finished. Cloud Scheduler will never allow two
simultaneously outstanding executions. For example, this implies that if
the `n+1`th execution is scheduled to run at 16:00 but the `n`th
execution takes until 16:15, the `n+1`th execution will not start until
`16:15`. A scheduled start time will be delayed if the previous
execution has not ended when its scheduled time occurs. If retry_count
> 0 and a job attempt fails, the job will be tried a total of
retry_count times, with exponential backoff, until the next scheduled
start time.
scheduleTime: Output only. The next time the job is scheduled. Note that
this may be a retry of a previously failed attempt or the next execution
time according to the schedule.
state: Output only. State of the job.
status: Output only. The response from the target for the last attempted
execution.
timeZone: Specifies the time zone to be used in interpreting schedule. The
value of this field must be a time zone name from the [tz
database](http://en.wikipedia.org/wiki/Tz_database). Note that some
time zones include a provision for daylight savings time. The rules for
daylight saving time are determined by the chosen tz. For UTC use the
string "utc". If a time zone is not specified, the default will be in
UTC (also known as GMT).
userUpdateTime: Output only. The creation time of the job.
"""
class StateValueValuesEnum(_messages.Enum):
r"""Output only. State of the job.
Values:
STATE_UNSPECIFIED: Unspecified state.
ENABLED: The job is executing normally.
PAUSED: The job is paused by the user. It will not execute. A user can
intentionally pause the job using PauseJobRequest.
DISABLED: The job is disabled by the system due to error. The user
cannot directly set a job to be disabled.
UPDATE_FAILED: The job state resulting from a failed
CloudScheduler.UpdateJob operation. To recover a job from this state,
retry CloudScheduler.UpdateJob until a successful response is
received.
"""
STATE_UNSPECIFIED = 0
ENABLED = 1
PAUSED = 2
DISABLED = 3
UPDATE_FAILED = 4
appEngineHttpTarget = _messages.MessageField('AppEngineHttpTarget', 1)
attemptDeadline = _messages.StringField(2)
description = _messages.StringField(3)
httpTarget = _messages.MessageField('HttpTarget', 4)
lastAttemptTime = _messages.StringField(5)
name = _messages.StringField(6)
pubsubTarget = _messages.MessageField('PubsubTarget', 7)
retryConfig = _messages.MessageField('RetryConfig', 8)
schedule = _messages.StringField(9)
scheduleTime = _messages.StringField(10)
state = _messages.EnumField('StateValueValuesEnum', 11)
status = _messages.MessageField('Status', 12)
timeZone = _messages.StringField(13)
userUpdateTime = _messages.StringField(14)
class ListJobsResponse(_messages.Message):
r"""Response message for listing jobs using ListJobs.
Fields:
jobs: The list of jobs.
nextPageToken: A token to retrieve next page of results. Pass this value
in the page_token field in the subsequent call to ListJobs to retrieve
the next page of results. If this is empty it indicates that there are
no more results through which to paginate. The page token is valid for
only 2 hours.
"""
jobs = _messages.MessageField('Job', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class ListLocationsResponse(_messages.Message):
r"""The response message for Locations.ListLocations.
Fields:
locations: A list of locations that matches the specified filter in the
request.
nextPageToken: The standard List next-page token.
"""
locations = _messages.MessageField('Location', 1, repeated=True)
nextPageToken = _messages.StringField(2)
class Location(_messages.Message):
r"""A resource that represents Google Cloud Platform location.
Messages:
LabelsValue: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
MetadataValue: Service-specific metadata. For example the available
capacity at the given location.
Fields:
displayName: The friendly name for this location, typically a nearby city
name. For example, "Tokyo".
labels: Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
locationId: The canonical id for this location. For example: `"us-east1"`.
metadata: Service-specific metadata. For example the available capacity at
the given location.
name: Resource name for the location, which may vary between
implementations. For example: `"projects/example-project/locations/us-
east1"`
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""Cross-service attributes for the location. For example
{"cloud.googleapis.com/region": "us-east1"}
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: Additional properties of type LabelsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""Service-specific metadata. For example the available capacity at the
given location.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
displayName = _messages.StringField(1)
labels = _messages.MessageField('LabelsValue', 2)
locationId = _messages.StringField(3)
metadata = _messages.MessageField('MetadataValue', 4)
name = _messages.StringField(5)
class OAuthToken(_messages.Message):
r"""Contains information needed for generating an [OAuth
token](https://developers.google.com/identity/protocols/OAuth2). This type
of authorization should generally only be used when calling Google APIs
hosted on *.googleapis.com.
Fields:
scope: OAuth scope to be used for generating OAuth access token. If not
specified, "https://www.googleapis.com/auth/cloud-platform" will be
used.
serviceAccountEmail: [Service account
email](https://cloud.google.com/iam/docs/service-accounts) to be used
for generating OAuth token. The service account must be within the same
project as the job. The caller must have iam.serviceAccounts.actAs
permission for the service account.
"""
scope = _messages.StringField(1)
serviceAccountEmail = _messages.StringField(2)
class OidcToken(_messages.Message):
r"""Contains information needed for generating an [OpenID Connect
token](https://developers.google.com/identity/protocols/OpenIDConnect). This
type of authorization can be used for many scenarios, including calling
Cloud Run, or endpoints where you intend to validate the token yourself.
Fields:
audience: Audience to be used when generating OIDC token. If not
specified, the URI specified in target will be used.
serviceAccountEmail: [Service account
email](https://cloud.google.com/iam/docs/service-accounts) to be used
for generating OIDC token. The service account must be within the same
project as the job. The caller must have iam.serviceAccounts.actAs
permission for the service account.
"""
audience = _messages.StringField(1)
serviceAccountEmail = _messages.StringField(2)
class PauseJobRequest(_messages.Message):
r"""Request message for PauseJob."""
class PubsubMessage(_messages.Message):
r"""A message that is published by publishers and consumed by subscribers.
The message must contain either a non-empty data field or at least one
attribute. Note that client libraries represent this object differently
depending on the language. See the corresponding <a
href="https://cloud.google.com/pubsub/docs/reference/libraries">client
library documentation</a> for more information. See <a
href="https://cloud.google.com/pubsub/quotas">Quotas and limits</a> for more
information about message limits.
Messages:
AttributesValue: Optional attributes for this message.
Fields:
attributes: Optional attributes for this message.
data: The message data field. If this field is empty, the message must
contain at least one attribute.
messageId: ID of this message, assigned by the server when the message is
published. Guaranteed to be unique within the topic. This value may be
read by a subscriber that receives a `PubsubMessage` via a `Pull` call
or a push delivery. It must not be populated by the publisher in a
`Publish` call.
publishTime: The time at which the message was published, populated by the
server when it receives the `Publish` call. It must not be populated by
the publisher in a `Publish` call.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class AttributesValue(_messages.Message):
r"""Optional attributes for this message.
Messages:
AdditionalProperty: An additional property for a AttributesValue object.
Fields:
additionalProperties: Additional properties of type AttributesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a AttributesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
attributes = _messages.MessageField('AttributesValue', 1)
data = _messages.BytesField(2)
messageId = _messages.StringField(3)
publishTime = _messages.StringField(4)
class PubsubTarget(_messages.Message):
r"""Pub/Sub target. The job will be delivered by publishing a message to the
given Pub/Sub topic.
Messages:
AttributesValue: Attributes for PubsubMessage. Pubsub message must
contain either non-empty data, or at least one attribute.
Fields:
attributes: Attributes for PubsubMessage. Pubsub message must contain
either non-empty data, or at least one attribute.
data: The message payload for PubsubMessage. Pubsub message must contain
either non-empty data, or at least one attribute.
topicName: Required. The name of the Cloud Pub/Sub topic to which messages
will be published when a job is delivered. The topic name must be in the
same format as required by PubSub's [PublishRequest.name](https://cloud.
google.com/pubsub/docs/reference/rpc/google.pubsub.v1#publishrequest),
for example `projects/PROJECT_ID/topics/TOPIC_ID`. The topic must be in
the same project as the Cloud Scheduler job.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class AttributesValue(_messages.Message):
r"""Attributes for PubsubMessage. Pubsub message must contain either non-
empty data, or at least one attribute.
Messages:
AdditionalProperty: An additional property for a AttributesValue object.
Fields:
additionalProperties: Additional properties of type AttributesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a AttributesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
attributes = _messages.MessageField('AttributesValue', 1)
data = _messages.BytesField(2)
topicName = _messages.StringField(3)
class ResumeJobRequest(_messages.Message):
r"""Request message for ResumeJob."""
class RetryConfig(_messages.Message):
r"""Settings that determine the retry behavior. By default, if a job does
not complete successfully (meaning that an acknowledgement is not received
from the handler, then it will be retried with exponential backoff according
to the settings in RetryConfig.
Fields:
maxBackoffDuration: The maximum amount of time to wait before retrying a
job after it fails. The default value of this field is 1 hour.
maxDoublings: The time between retries will double `max_doublings` times.
A job's retry interval starts at min_backoff_duration, then doubles
`max_doublings` times, then increases linearly, and finally retries
retries at intervals of max_backoff_duration up to retry_count times.
For example, if min_backoff_duration is 10s, max_backoff_duration is
300s, and `max_doublings` is 3, then the a job will first be retried in
10s. The retry interval will double three times, and then increase
linearly by 2^3 * 10s. Finally, the job will retry at intervals of
max_backoff_duration until the job has been attempted retry_count times.
Thus, the requests will retry at 10s, 20s, 40s, 80s, 160s, 240s, 300s,
300s, .... The default value of this field is 5.
maxRetryDuration: The time limit for retrying a failed job, measured from
time when an execution was first attempted. If specified with
retry_count, the job will be retried until both limits are reached. The
default value for max_retry_duration is zero, which means retry duration
is unlimited.
minBackoffDuration: The minimum amount of time to wait before retrying a
job after it fails. The default value of this field is 5 seconds.
retryCount: The number of attempts that the system will make to run a job
using the exponential backoff procedure described by max_doublings. The
default value of retry_count is zero. If retry_count is zero, a job
attempt will *not* be retried if it fails. Instead the Cloud Scheduler
system will wait for the next scheduled execution time. If retry_count
is set to a non-zero number then Cloud Scheduler will retry failed
attempts, using exponential backoff, retry_count times, or until the
next scheduled execution time, whichever comes first. Values greater
than 5 and negative values are not allowed.
"""
maxBackoffDuration = _messages.StringField(1)
maxDoublings = _messages.IntegerField(2, variant=_messages.Variant.INT32)
maxRetryDuration = _messages.StringField(3)
minBackoffDuration = _messages.StringField(4)
retryCount = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class RunJobRequest(_messages.Message):
r"""Request message for forcing a job to run now using
RunJob."""
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default=u'json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
class Status(_messages.Message):
r"""The `Status` type defines a logical error model that is suitable for
different programming environments, including REST APIs and RPC APIs. It is
used by [gRPC](https://github.com/grpc). Each `Status` message contains
three pieces of data: error code, error message, and error details. You can
find out more about this error model and how to work with it in the [API
Design Guide](https://cloud.google.com/apis/design/errors).
Messages:
DetailsValueListEntry: A DetailsValueListEntry object.
Fields:
code: The status code, which should be an enum value of google.rpc.Code.
details: A list of messages that carry the error details. There is a
common set of message types for APIs to use.
message: A developer-facing error message, which should be in English. Any
user-facing error message should be localized and sent in the
google.rpc.Status.details field, or localized by the client.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class DetailsValueListEntry(_messages.Message):
r"""A DetailsValueListEntry object.
Messages:
AdditionalProperty: An additional property for a DetailsValueListEntry
object.
Fields:
additionalProperties: Properties of the object. Contains field @type
with type URL.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a DetailsValueListEntry object.
Fields:
key: Name of the additional property.
value: A extra_types.JsonValue attribute.
"""
key = _messages.StringField(1)
value = _messages.MessageField('extra_types.JsonValue', 2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
code = _messages.IntegerField(1, variant=_messages.Variant.INT32)
details = _messages.MessageField('DetailsValueListEntry', 2, repeated=True)
message = _messages.StringField(3)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| 43.475962 | 89 | 0.724184 |
acf9d81f5f169c82f8a2bea6fc444e0d7c6276f3 | 448 | py | Python | pr4.py | maxtortime/euler_project | b41a9a2fbcbde71ca13acd5eecde7ab3234bcc8d | [
"MIT"
] | 1 | 2017-10-29T14:58:13.000Z | 2017-10-29T14:58:13.000Z | euler_project/pr4.py | maxtortime/algorithm | 6185b1b61e12bfbc5a3e87917f878e7a7902ea2f | [
"MIT"
] | null | null | null | euler_project/pr4.py | maxtortime/algorithm | 6185b1b61e12bfbc5a3e87917f878e7a7902ea2f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
def reverse_int(n):
return int(str(n)[::-1])
def split_half_int(n,i):
n_str = str(n)
if i == 1:
return int(n_str[int(len(n_str)/2):])
else:
return int(n_str[:int(len(n_str)/2)])
parlinums = []
for i in range(100,1000):
for j in range(100,1000):
if reverse_int(split_half_int(i*j,1)) == split_half_int(i*j,0):
parlinums.append(i * j)
print (max(parlinums))
exit()
| 18.666667 | 71 | 0.584821 |
acf9d9ab061f249d9bc94be78c11422565dde883 | 7,836 | py | Python | doc/conf.py | sciapp/pyMolDyn | fba6ea91cb185f916b930cd25b4b1d28a22fb4c5 | [
"MIT"
] | 11 | 2016-10-25T09:48:36.000Z | 2021-01-30T18:59:50.000Z | doc/conf.py | sciapp/pyMolDyn | fba6ea91cb185f916b930cd25b4b1d28a22fb4c5 | [
"MIT"
] | 1 | 2017-09-19T06:03:36.000Z | 2017-09-28T11:29:23.000Z | doc/conf.py | sciapp/pyMolDyn | fba6ea91cb185f916b930cd25b4b1d28a22fb4c5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# pyMolDyn2 documentation build configuration file, created by
# sphinx-quickstart on Tue Aug 27 09:12:51 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src'))
sys.path.insert(0, '/usr/local/gr/lib/python')
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyMolDyn2'
copyright = u'2013, Florian Rhiem'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0'
# The full version, including alpha/beta/rc tags.
release = '0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyMolDyn2doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyMolDyn2.tex', u'pyMolDyn2 Documentation',
u'Florian Rhiem', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pymoldyn2', u'pyMolDyn2 Documentation',
[u'Florian Rhiem'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyMolDyn2', u'pyMolDyn2 Documentation',
u'Florian Rhiem', 'pyMolDyn2', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| 32.114754 | 80 | 0.714778 |
acf9db7c59a065a480847569ce254d31b32ee20f | 4,435 | py | Python | tests/integration/obmd.py | CCI-MOC/hil | 8c6de2214ddf908c01549b117d5684ac52a93934 | [
"ECL-2.0",
"Apache-2.0"
] | 23 | 2016-09-27T18:41:46.000Z | 2021-12-20T15:06:32.000Z | tests/integration/obmd.py | CCI-MOC/hil | 8c6de2214ddf908c01549b117d5684ac52a93934 | [
"ECL-2.0",
"Apache-2.0"
] | 522 | 2016-06-22T21:19:26.000Z | 2020-11-12T20:54:40.000Z | tests/integration/obmd.py | CCI-MOC/hil | 8c6de2214ddf908c01549b117d5684ac52a93934 | [
"ECL-2.0",
"Apache-2.0"
] | 30 | 2016-06-24T08:27:59.000Z | 2018-08-20T17:34:45.000Z | """Integration tests for interactions with obmd."""
import json
import requests
import pytest
from hil.test_common import config_testsuite, config_merge, \
fresh_database, fail_on_log_warnings, server_init, with_request_context, \
obmd_cfg
from hil import api, config, errors
@pytest.fixture()
def configure():
"""Set up the HIL configureation."""
config_testsuite()
config_merge({
'devel': {
'dry_run': None,
},
})
config.load_extensions()
fresh_database = pytest.fixture(fresh_database)
fail_on_log_warnings = pytest.fixture(fail_on_log_warnings)
server_init = pytest.fixture(server_init)
with_request_context = pytest.yield_fixture(with_request_context)
obmd_cfg = pytest.fixture(obmd_cfg)
pytestmark = pytest.mark.usefixtures(
'configure',
'fresh_database',
'fail_on_log_warnings',
'server_init',
'with_request_context',
)
@pytest.fixture
def mock_node(obmd_cfg):
"""Register a node wth obmd & hil. returns the node's label.
The node will be attached to a project called 'anvil-nextgen'.
"""
obmd_uri = 'http://localhost' + obmd_cfg['ListenAddr'] + '/node/node-99'
# register a node with obmd:
requests.put(
obmd_uri,
auth=('admin', obmd_cfg['AdminToken']),
data=json.dumps({
"type": "mock",
"info": {
"addr": "10.0.0.4",
"NumWrites": 0,
},
}))
# and then with hil:
api.node_register(
node='node-99',
obmd={
'uri': obmd_uri,
'admin_token': obmd_cfg['AdminToken'],
},
)
# Create a project, and attach the node.
api.project_create('anvil-nextgen')
api.project_connect_node('anvil-nextgen', 'node-99')
return 'node-99'
def test_enable_disable_obm(mock_node):
"""Test enabling and disabling the obm of a node via the api."""
# First, enable the obm
api.node_enable_disable_obm(mock_node, enabled=True)
# Obm is enabled; we shouldn't be able to detach the node:
with pytest.raises(errors.BlockedError):
api.project_detach_node('anvil-nextgen', mock_node)
# ...so disable it first:
api.node_enable_disable_obm(mock_node, enabled=False)
# ...and then it should work:
api.project_detach_node('anvil-nextgen', mock_node)
def _follow_redirect(method, resp, data=None, stream=False):
assert resp.status_code == 307
resp = requests.request(method, resp.location, data=data, stream=stream)
assert resp.ok
return resp
def test_power_operations(mock_node):
"""Test the power-related obm api calls.
i.e. power_off, power_cycle, set_bootdev, power_status
"""
# Obm is disabled; these should all fail:
with pytest.raises(errors.BlockedError):
api.node_power_off(mock_node)
with pytest.raises(errors.BlockedError):
api.node_power_on(mock_node)
with pytest.raises(errors.BlockedError):
api.node_set_bootdev(mock_node, 'A')
with pytest.raises(errors.BlockedError):
api.node_power_cycle(mock_node, force=True)
with pytest.raises(errors.BlockedError):
api.node_power_cycle(mock_node, force=False)
with pytest.raises(errors.BlockedError):
api.node_power_status(mock_node)
with pytest.raises(errors.BlockedError):
api.show_console(mock_node)
# Now let's enable it and try again.
api.node_enable_disable_obm(mock_node, enabled=True)
def _power_cycle(force):
_follow_redirect(
'POST',
api.node_power_cycle(mock_node, force=force),
data=json.dumps({
'force': force,
}))
_follow_redirect('POST', api.node_power_off(mock_node))
resp = _follow_redirect('GET', api.node_power_status(mock_node))
assert json.loads(resp.content) == {'power_status': 'Mock Status'}
_follow_redirect('POST', api.node_power_on(mock_node))
_follow_redirect(
'PUT',
api.node_set_bootdev(mock_node, 'A'),
data=json.dumps({'bootdev': 'A'}),
)
_power_cycle(True)
_power_cycle(False)
resp = _follow_redirect('GET', api.show_console(mock_node), stream=True)
# Read the first chunk of the output from the console to make sure it
# looks right:
i = 0
for line in resp.iter_lines():
assert line == str(i)
if i >= 10:
break
i += 1
| 28.248408 | 78 | 0.655919 |
acf9dbd273dfda8f3b21740f72f31cbff3fbe00c | 946 | py | Python | scripts/example/model_submission_example.py | riccardopoiani/recsys_2019 | 47a44d2f7d85e76e31dacf4ba2e69721d010b6b8 | [
"MIT"
] | 2 | 2020-10-01T11:08:32.000Z | 2020-11-25T11:05:37.000Z | scripts/example/model_submission_example.py | riccardopoiani/recsys_2019 | 47a44d2f7d85e76e31dacf4ba2e69721d010b6b8 | [
"MIT"
] | null | null | null | scripts/example/model_submission_example.py | riccardopoiani/recsys_2019 | 47a44d2f7d85e76e31dacf4ba2e69721d010b6b8 | [
"MIT"
] | 1 | 2020-10-01T11:08:33.000Z | 2020-10-01T11:08:33.000Z | from src.data_management.RecSys2019Reader import RecSys2019Reader
from course_lib.KNN.ItemKNNCFRecommender import ItemKNNCFRecommender
from src.model_management.submission_helper import *
from src.data_management.data_reader import read_target_playlist
if __name__ == '__main__':
# Data loading
dataset = RecSys2019Reader("../data/train.csv", "../data/tracks.csv")
dataset.load_data()
URM_all = dataset.get_URM_all()
# Model building
item_cf_keywargs = {'topK': 548, 'shrink': 447, 'similarity': 'cosine', 'normalize': True, 'feature_weighting': 'TF-IDF'}
item_cf = ItemKNNCFRecommender(URM_train=URM_all)
item_cf.fit(**item_cf_keywargs)
# Getting target tracklist
target_playlist = read_target_playlist()
print(type(target_playlist))
print(target_playlist)
write_submission_file(path="../report/submitted_models/item_cf_10_map_fixed.csv", userlist=target_playlist, recommender=item_cf)
| 36.384615 | 132 | 0.764271 |
acf9dcfcd87620180de8d580ff63f4f41630f026 | 1,942 | py | Python | tests/python/test_off_resonance.py | aTrotier/sycomore | 32e438d3a90ca0a9d051bb6acff461e06079116d | [
"MIT"
] | 14 | 2019-11-06T09:23:09.000Z | 2022-01-11T19:08:36.000Z | tests/python/test_off_resonance.py | aTrotier/sycomore | 32e438d3a90ca0a9d051bb6acff461e06079116d | [
"MIT"
] | 2 | 2020-12-01T15:48:27.000Z | 2020-12-04T15:19:37.000Z | tests/python/test_off_resonance.py | aTrotier/sycomore | 32e438d3a90ca0a9d051bb6acff461e06079116d | [
"MIT"
] | 2 | 2020-08-12T04:36:36.000Z | 2021-05-27T13:17:34.000Z | import math
import os
import struct
import unittest
import sycomore
from sycomore.units import *
class TestOffResonance(unittest.TestCase):
def test_off_resonance(self):
species = sycomore.Species(0*Hz, 0*Hz, 0*um*um/ms)
m0 = sycomore.Magnetization(0, 0, 1)
pulse = sycomore.Pulse(90*deg, math.pi*rad)
pulse_duration = 1*ms
pulse_support_size = 101
zero_crossings = 2
# NOTE: in the absence of relaxation and diffusion, the TR is meaningless
TR = 500*ms;
slice_thickness = 1*mm;
t0 = pulse_duration/(2*zero_crossings)
sinc_pulse = sycomore.HardPulseApproximation(
pulse,
sycomore.linspace(pulse_duration, pulse_support_size),
sycomore.sinc_envelope(t0), 1/t0, slice_thickness, "rf")
refocalization = sycomore.TimeInterval(
(TR-pulse_duration)/2., -sinc_pulse.get_gradient_moment()/2)
model = sycomore.como.Model(
species, m0, [
["rf", sinc_pulse.get_time_interval()],
["refocalization", refocalization]])
model.apply_pulse(sinc_pulse)
model.apply_time_interval("refocalization")
frequencies = sycomore.linspace(60.*rad/ms, 201)
magnetization = [
model.isochromat(set(), sycomore.Point(), f) for f in frequencies]
root = os.environ["SYCOMORE_TEST_DATA"]
with open(os.path.join(root, "baseline", "off_resonance.dat"), "rb") as fd:
contents = fd.read()
baseline = struct.unpack((int(len(contents)/8))*"d", contents)
self.assertEqual(len(baseline), 2*len(magnetization))
for i in range(len(magnetization)):
self.assertAlmostEqual(
sycomore.transversal(magnetization[i]), baseline[2*i])
self.assertAlmostEqual(magnetization[i][2], baseline[2*i+1])
if __name__ == "__main__":
unittest.main()
| 34.070175 | 83 | 0.629763 |
acf9dd0e66c2a4d985eb1cabf0c0547ee3c5066f | 183 | py | Python | 1470. Shuffle the array.py | bogdan824/LeetCode-Problems | 3e306b073be4130c5682b74c9f501ae80b7ef5cf | [
"MIT"
] | null | null | null | 1470. Shuffle the array.py | bogdan824/LeetCode-Problems | 3e306b073be4130c5682b74c9f501ae80b7ef5cf | [
"MIT"
] | null | null | null | 1470. Shuffle the array.py | bogdan824/LeetCode-Problems | 3e306b073be4130c5682b74c9f501ae80b7ef5cf | [
"MIT"
] | null | null | null | def shuffArray(nums,n):
keep = []
for i in range(n):
j=i+n
keep.append(nums[i])
keep.append(nums[j])
return keep
nums = [1,1,2,2]
n = 2
print(shuffArray(nums,n)) | 15.25 | 25 | 0.584699 |
acf9dd6f669afd28c5998bf3f2e772183a49b6ef | 160 | py | Python | nbdev_playground/core.py | dharmeshkakadia/nbdev_playground | 6ce33d7d6ca26c09e7df2dd06ab67b668bff3169 | [
"Apache-2.0"
] | null | null | null | nbdev_playground/core.py | dharmeshkakadia/nbdev_playground | 6ce33d7d6ca26c09e7df2dd06ab67b668bff3169 | [
"Apache-2.0"
] | 1 | 2020-09-13T21:11:26.000Z | 2020-09-13T21:11:26.000Z | nbdev_playground/core.py | dharmeshkakadia/nbdev_playground | 6ce33d7d6ca26c09e7df2dd06ab67b668bff3169 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['print_hello']
# Cell
def print_hello():
return "hello" | 22.857143 | 87 | 0.71875 |
acf9dec20c115a47aa9a571044bbda6c686ab0a1 | 2,655 | py | Python | src/mbed_tools/build/_internal/config/cumulative_data.py | rwalton-arm/mbed-tools | 131605540f4829116f977695a47dc10b3ac96450 | [
"Apache-2.0"
] | null | null | null | src/mbed_tools/build/_internal/config/cumulative_data.py | rwalton-arm/mbed-tools | 131605540f4829116f977695a47dc10b3ac96450 | [
"Apache-2.0"
] | null | null | null | src/mbed_tools/build/_internal/config/cumulative_data.py | rwalton-arm/mbed-tools | 131605540f4829116f977695a47dc10b3ac96450 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2020 Arm Mbed. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Ability to parse cumulative attributes from Sources."""
import itertools
import re
from dataclasses import dataclass, field, fields
from typing import Any, Iterable, Set, Tuple
from mbed_tools.build._internal.config.source import Source
@dataclass
class CumulativeData:
"""Representation of cumulative attributes assembled during Source parsing."""
features: Set[str] = field(default_factory=set)
components: Set[str] = field(default_factory=set)
labels: Set[str] = field(default_factory=set)
extra_labels: Set[str] = field(default_factory=set)
device_has: Set[str] = field(default_factory=set)
macros: Set[str] = field(default_factory=set)
c_lib: str = field(default_factory=str)
printf_lib: str = field(default_factory=str)
@classmethod
def from_sources(cls, sources: Iterable[Source]) -> "CumulativeData":
"""Interrogate each Source in turn to create final CumulativeData."""
data = CumulativeData()
for source in sources:
for key, value in source.overrides.items():
if key in CUMULATIVE_OVERRIDE_KEYS_IN_SOURCE:
_modify_field(data, key, value)
return data
def _modify_field(data: CumulativeData, key: str, value: Any) -> None:
"""Mutates CumulativeData in place by adding, removing or resetting the value of a field."""
key, modifier = _extract_target_modifier_data(key)
if modifier == "add":
new_value = getattr(data, key) | set(value)
elif modifier == "remove":
new_value = getattr(data, key) - set(value)
else:
if type(value) is str:
new_value = value
else:
new_value = set(value)
setattr(data, key, new_value)
_CUMULATIVE_FIELDS = [f.name for f in fields(CumulativeData)]
_PREFIXED_CUMULATIVE_FIELDS = [f"target.{f}" for f in _CUMULATIVE_FIELDS]
CUMULATIVE_OVERRIDE_KEYS_IN_SOURCE = _PREFIXED_CUMULATIVE_FIELDS + [
f"{attr}_{suffix}" for attr, suffix in itertools.product(_PREFIXED_CUMULATIVE_FIELDS, ["add", "remove"])
]
def _extract_target_modifier_data(key: str) -> Tuple[str, str]:
regex = fr"""
(?P<key>{'|'.join(_CUMULATIVE_FIELDS)}) # attribute name (one of ACCUMULATING_OVERRIDES)
_? # separator
(?P<modifier>(add|remove)?) # modifier (add, remove or empty)
"""
match = re.search(regex, key, re.VERBOSE)
if not match:
raise ValueError(f"Not a target modifier key {key}")
return match["key"], match["modifier"]
| 37.928571 | 108 | 0.669303 |
acf9dedf7a36a5b243d169bcb050a0d6cd491855 | 2,952 | py | Python | test/stateful_test/tf_behaivor_scripts/temp_rnn_test.py | CrikeeIP/frugally-deep | c7badaa545a624956f86ba65f66aefe27b5e7218 | [
"MIT"
] | 881 | 2016-12-20T21:34:09.000Z | 2022-03-30T12:49:03.000Z | test/stateful_test/tf_behaivor_scripts/temp_rnn_test.py | CrikeeIP/frugally-deep | c7badaa545a624956f86ba65f66aefe27b5e7218 | [
"MIT"
] | 300 | 2017-11-15T05:31:09.000Z | 2022-03-31T18:22:10.000Z | test/stateful_test/tf_behaivor_scripts/temp_rnn_test.py | CrikeeIP/frugally-deep | c7badaa545a624956f86ba65f66aefe27b5e7218 | [
"MIT"
] | 224 | 2017-11-15T02:24:28.000Z | 2022-03-30T12:49:05.000Z | import os
os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES']=''
import numpy as np
from tensorflow.keras.layers import Input, Dense, SimpleRNN, GRU, LSTM, Bidirectional
from tensorflow.keras.models import Model
USE_TOY_WEIGHTS = True
REC_LAYER = GRU
sequence_length = 3
feature_dim = 1
features_in = Input(batch_shape=(1, sequence_length, feature_dim))
state_h_in = Input(batch_shape=(1, 1))
rnn_out = REC_LAYER(1, activation=None, use_bias=False, return_sequences=True, return_state=False, stateful=False)(features_in, initial_state=state_h_in)
stateless_model = Model(inputs=[features_in, state_h_in], outputs=rnn_out)
stateful_rnn_out = REC_LAYER(1, activation=None, use_bias=False, return_sequences=True, return_state=False, stateful=True)(features_in, initial_state=state_h_in)
stateful_model = Model(inputs=[features_in, state_h_in], outputs=stateful_rnn_out)
if USE_TOY_WEIGHTS:
if REC_LAYER == SimpleRNN:
toy_weights = [ np.asarray([[1.0]], dtype=np.float32), np.asarray([[-0.5]], dtype=np.float32)]
elif REC_LAYER == GRU:
# for a GRU, the first are the non-recurrent kernels W, and the second are the recurrent kernels U (V)
toy_weights = [np.asarray([[ 1.0, -2.0, 3.0 ]], dtype=np.float32), np.asarray([[ -0.5 , 2.0, -1.1 ]], dtype=np.float32)]
stateless_model.set_weights(toy_weights)
stateful_model.set_weights(toy_weights)
# w = stateless_model.get_weights()
# print(w)
stateless_model.save('temp_stateless.h5', include_optimizer=False)
stateful_model.save('temp_stateful.h5', include_optimizer=False)
x_in = np.zeros(sequence_length)
x_in[0] = 1
x_in = x_in.reshape( (1, sequence_length, feature_dim) )
initial_state = np.asarray( [10])
initial_state = initial_state.reshape((1,1))
def print_rnn_out(non_stateful_out, stateful_out):
fb = ['FWD::', 'BWD::']
print(f'non_stateful: {non_stateful_out}')
print(f'stateful: {stateful_out}')
print(f'delta: {stateful_out-non_stateful_out}')
non_stateful_out = stateless_model.predict([x_in, initial_state]).reshape((sequence_length))
stateful_out = stateful_model.predict([x_in, initial_state]).reshape((sequence_length))
print_rnn_out(non_stateful_out, stateful_out)
non_stateful_out = stateless_model.predict([x_in, initial_state]).reshape((sequence_length))
stateful_out = stateful_model.predict([x_in, initial_state]).reshape((sequence_length))
print_rnn_out(non_stateful_out, stateful_out)
print('\n** RESETING STATES in STATEFUL MODEL **\n')
stateful_model.reset_states()
non_stateful_out = stateless_model.predict([x_in, initial_state]).reshape((sequence_length))
stateful_out = stateful_model.predict([x_in, initial_state]).reshape((sequence_length))
print_rnn_out(non_stateful_out, stateful_out)
non_stateful_out = stateless_model.predict([x_in, initial_state]).reshape((sequence_length))
stateful_out = stateful_model.predict([x_in, initial_state]).reshape((sequence_length))
print_rnn_out(non_stateful_out, stateful_out)
| 42.782609 | 162 | 0.783537 |
acf9def1bfd1b7da1573cbfa03125810cafc7a87 | 643 | py | Python | staging/versions/110a27b89211_.py | farbodab/flatteningthecurve | 692fd9c8d78355e1208ff85a2cd1038da11c392f | [
"MIT"
] | 1 | 2020-03-24T23:46:29.000Z | 2020-03-24T23:46:29.000Z | staging/versions/110a27b89211_.py | farbodab/flatteningthecurve | 692fd9c8d78355e1208ff85a2cd1038da11c392f | [
"MIT"
] | 13 | 2021-02-08T20:51:14.000Z | 2022-03-12T00:43:30.000Z | staging/versions/110a27b89211_.py | farbodab/flatteningthecurve | 692fd9c8d78355e1208ff85a2cd1038da11c392f | [
"MIT"
] | 3 | 2020-06-09T20:24:29.000Z | 2020-06-09T20:26:16.000Z | """empty message
Revision ID: 110a27b89211
Revises: 2ae3c47266e4
Create Date: 2020-05-25 00:04:13.714116
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '110a27b89211'
down_revision = '2ae3c47266e4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('viz', sa.Column('date', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('viz', 'date')
# ### end Alembic commands ###
| 22.172414 | 73 | 0.685848 |
acf9df5364999b77dc49b8f1da3614c99853d3e7 | 318 | py | Python | rhcephcompose/tests/test_build.py | red-hat-storage/rhcephcompose | bca6846be442488f5f29d0c9ebc6a071bf525de7 | [
"MIT"
] | 1 | 2016-05-27T13:15:56.000Z | 2016-05-27T13:15:56.000Z | rhcephcompose/tests/test_build.py | red-hat-storage/rhcephcompose | bca6846be442488f5f29d0c9ebc6a071bf525de7 | [
"MIT"
] | 28 | 2016-04-18T16:06:53.000Z | 2021-01-04T22:18:31.000Z | rhcephcompose/tests/test_build.py | red-hat-storage/rhcephcompose | bca6846be442488f5f29d0c9ebc6a071bf525de7 | [
"MIT"
] | 2 | 2016-04-18T15:52:11.000Z | 2016-04-18T16:37:33.000Z | from rhcephcompose.build import Build
class TestBuild(object):
def test_constructor(self):
b = Build('mypackage_1.0-1')
assert b.build_id == 'mypackage_1.0-1'
assert b.name == 'mypackage'
assert b.version == '1.0-1'
assert b.binaries == []
assert b.sources == []
| 24.461538 | 46 | 0.59434 |
acf9df8bdb60e1626834cf65e840461330c65b0d | 184 | py | Python | old/mdss/dump_es.py | rohe/fedservice | 1460d21217b804cac0f38fa26ffa24bee7cf6dad | [
"Apache-2.0"
] | 3 | 2018-11-28T12:01:31.000Z | 2020-12-16T21:43:29.000Z | example/mdss/dump_es.py | peppelinux/fedservice | 0dc5fd0bd33e181b6a1a9bbef6835b2ce5d2f568 | [
"Apache-2.0"
] | 13 | 2020-02-10T15:33:37.000Z | 2022-02-01T16:43:36.000Z | example/mdss/dump_es.py | peppelinux/fedservice | 0dc5fd0bd33e181b6a1a9bbef6835b2ce5d2f568 | [
"Apache-2.0"
] | 4 | 2019-05-29T10:04:48.000Z | 2020-10-14T09:52:53.000Z | #!/usr/bin/env python3
import json
import sys
from cryptojwt.jws.jws import factory
_jws = factory(sys.stdin.read())
print(json.dumps(_jws.jwt.payload(), indent=4, sort_keys=True))
| 18.4 | 63 | 0.75 |
acf9dfd4a8f6602c6c8bb8fad6a38375ad9bf034 | 1,841 | py | Python | code/basics/src/fancy_action_server.py | amjadmajid/rosbook | 20d4ab94d910adc62c4aecb471ceac13b5cef5ad | [
"Apache-2.0"
] | 442 | 2015-12-11T02:59:16.000Z | 2022-03-31T22:10:25.000Z | code/basics/src/fancy_action_server.py | amjadmajid/rosbook | 20d4ab94d910adc62c4aecb471ceac13b5cef5ad | [
"Apache-2.0"
] | 41 | 2016-01-07T19:15:29.000Z | 2021-12-03T01:52:58.000Z | code/basics/src/fancy_action_server.py | amjadmajid/rosbook | 20d4ab94d910adc62c4aecb471ceac13b5cef5ad | [
"Apache-2.0"
] | 249 | 2015-11-27T10:22:33.000Z | 2022-03-28T09:52:05.000Z | #! /usr/bin/env python
# BEGIN ALL
#! /usr/bin/env python
import rospy
import time
import actionlib
# BEGIN PART_1
from basics.msg import TimerAction, TimerGoal, TimerResult, TimerFeedback
# END PART_1
def do_timer(goal):
start_time = time.time()
# BEGIN PART_2
update_count = 0
# END PART_2
# BEGIN PART_3
if goal.time_to_wait.to_sec() > 60.0:
result = TimerResult()
result.time_elapsed = rospy.Duration.from_sec(time.time() - start_time)
result.updates_sent = update_count
server.set_aborted(result, "Timer aborted due to too-long wait")
return
# END PART_3
# BEGIN PART_4
while (time.time() - start_time) < goal.time_to_wait.to_sec():
# END PART_4
# BEGIN PART_5
if server.is_preempt_requested():
result = TimerResult()
result.time_elapsed = rospy.Duration.from_sec(time.time() - start_time)
result.updates_sent = update_count
server.set_preempted(result, "Timer preempted")
return
# END PART_5
# BEGIN PART_6
feedback = TimerFeedback()
feedback.time_elapsed = rospy.Duration.from_sec(time.time() - start_time)
feedback.time_remaining = goal.time_to_wait - feedback.time_elapsed
server.publish_feedback(feedback)
update_count += 1
# END PART_6
# BEGIN PART_7
time.sleep(1.0)
# END PART_7
# BEGIN PART_8
result = TimerResult()
result.time_elapsed = rospy.Duration.from_sec(time.time() - start_time)
result.updates_sent = update_count
server.set_succeeded(result, "Timer completed successfully")
# END PART_8
rospy.init_node('timer_action_server')
server = actionlib.SimpleActionServer('timer', TimerAction, do_timer, False)
server.start()
rospy.spin()
# END ALL
| 28.765625 | 83 | 0.663227 |
acf9e00da8143312cfd4247a987b72c14fb30b09 | 2,567 | py | Python | setup.py | vicchuayh/speach | 0de88dc4ee6958e31b084cf1f6e018c40b4af0e4 | [
"MIT"
] | null | null | null | setup.py | vicchuayh/speach | 0de88dc4ee6958e31b084cf1f6e018c40b4af0e4 | [
"MIT"
] | null | null | null | setup.py | vicchuayh/speach | 0de88dc4ee6958e31b084cf1f6e018c40b4af0e4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Setup script for speach
Latest version can be found at https://github.com/neocl/speach
@author: Le Tuan Anh <tuananh.ke@gmail.com>
@license: MIT
'''
# This code is a part of speach library: https://github.com/neocl/speach/
# :copyright: (c) 2018 Le Tuan Anh <tuananh.ke@gmail.com>
# :license: MIT, see LICENSE for more details.
import io
from setuptools import setup
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
readme_file = 'README.md'
long_description = read(readme_file)
pkg_info = {}
exec(read('speach/__version__.py'), pkg_info)
with open('requirements.txt', 'r') as infile:
requirements = infile.read().splitlines()
setup(
name='speach',
version=pkg_info['__version__'],
tests_require=requirements + ['coverage'],
install_requires=requirements,
python_requires=">=3.6",
license=pkg_info['__license__'],
author=pkg_info['__author__'],
author_email=pkg_info['__email__'],
description=pkg_info['__description__'],
long_description=long_description,
long_description_content_type='text/markdown',
packages=['speach',
'speach.data'],
package_data={'speach': ['data/*.sql', 'data/*.gz']},
include_package_data=True,
url=pkg_info['__url__'],
project_urls={
"Bug Tracker": "https://github.com/neocl/speach/issues",
"Source Code": "https://github.com/neocl/speach/"
},
keywords=["nlp", "annotation", "text", "corpus", "linguistics", "ELAN", "transcription"],
platforms='any',
test_suite='test',
# Reference: https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=['Programming Language :: Python',
'Development Status :: 2 - Pre-Alpha',
'License :: OSI Approved :: {}'.format(pkg_info['__license__']),
'Environment :: Plugins',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Text Processing',
'Topic :: Text Processing :: Linguistic',
'Topic :: Software Development :: Libraries :: Python Modules']
)
| 33.337662 | 93 | 0.627191 |
acf9e0268217dd1564f7b4cf51d5c1e98e963207 | 7,979 | py | Python | varats/varats/plots/case_study_overview.py | se-passau/VaRA-Tool-Suite | b81d795327431af8c854244c67b67ce97d512b2b | [
"BSD-2-Clause"
] | 8 | 2019-10-30T08:07:44.000Z | 2020-11-13T08:02:36.000Z | varats/varats/plots/case_study_overview.py | se-passau/VaRA-Tool-Suite | b81d795327431af8c854244c67b67ce97d512b2b | [
"BSD-2-Clause"
] | 342 | 2019-02-14T15:53:31.000Z | 2020-11-03T18:11:27.000Z | varats/varats/plots/case_study_overview.py | se-passau/VaRA-Tool-Suite | b81d795327431af8c854244c67b67ce97d512b2b | [
"BSD-2-Clause"
] | 3 | 2020-04-15T13:24:44.000Z | 2020-10-27T21:13:10.000Z | """Generate plots that show a detailed overview of the state of one case-
study."""
import typing as tp
import matplotlib.pyplot as plt
from matplotlib import style
from pandas import DataFrame
from varats.data.databases.file_status_database import FileStatusDatabase
from varats.data.reports.empty_report import EmptyReport
from varats.mapping.commit_map import CommitMap, get_commit_map
from varats.paper.case_study import CaseStudy
from varats.plot.plot import Plot
from varats.plot.plot_utils import find_missing_revisions
from varats.plot.plots import PlotGenerator
from varats.project.project_util import (
get_project_cls_by_name,
get_local_project_git_path,
)
from varats.report.report import FileStatusExtension, BaseReport
from varats.ts_utils.cli_util import CLIOptionTy, make_cli_option
from varats.ts_utils.click_param_types import (
REQUIRE_REPORT_TYPE,
REQUIRE_CASE_STUDY,
)
from varats.utils.git_util import ShortCommitHash, FullCommitHash
SUCCESS_COLOR = (0.5568627450980392, 0.7294117647058823, 0.25882352941176473)
BLOCKED_COLOR = (0.20392156862745098, 0.5411764705882353, 0.7411764705882353)
FAILED_COLOR = (0.8862745098039215, 0.2901960784313726, 0.2)
COMPILE_ERROR_COLOR = (0.8862745098039215, 0.2901960784313726, 0.2)
MISSING_COLOR = (0.984313725490196, 0.7568627450980392, 0.3686274509803922)
BACKGROUND_COLOR = (0.4666666666666667, 0.4666666666666667, 0.4666666666666667)
OPTIONAL_SHOW_BLOCKED: CLIOptionTy = make_cli_option(
"--show-blocked/--hide-blocked",
type=bool,
default=True,
required=False,
metavar="show_blocked",
help="Shows/hides blocked revisions."
)
OPTIONAL_SHOW_ALL_BLOCKED: CLIOptionTy = make_cli_option(
"--show-all-blocked/--hide-all-blocked",
type=bool,
default=False,
required=False,
metavar="show_all_blocked",
help="Shows/hides all blocked revisions."
)
def _gen_overview_data(tag_blocked: bool,
**kwargs: tp.Any) -> tp.Dict[str, tp.List[int]]:
case_study: CaseStudy = kwargs["case_study"]
project_name = case_study.project_name
commit_map: CommitMap = get_commit_map(project_name)
project = get_project_cls_by_name(project_name)
if 'report_type' in kwargs:
result_file_type: tp.Type[BaseReport] = kwargs['report_type']
else:
result_file_type = EmptyReport
positions: tp.Dict[str, tp.List[int]] = {
"background": [],
"blocked": [],
"blocked_all": [],
"compile_error": [],
"failed": [],
"missing": [],
"success": []
}
for c_hash, index in commit_map.mapping_items():
if not case_study.has_revision(ShortCommitHash(c_hash)):
positions["background"].append(index)
if hasattr(project, "is_blocked_revision"
) and project.is_blocked_revision(c_hash)[0]:
positions["blocked_all"].append(index)
revisions = FileStatusDatabase.get_data_for_project(
project_name, ["revision", "time_id", "file_status"],
commit_map,
case_study,
result_file_type=result_file_type,
tag_blocked=tag_blocked
)
positions["success"] = (
revisions[revisions["file_status"] ==
FileStatusExtension.SUCCESS.get_status_extension()]
)["time_id"].tolist()
positions["failed"] = (
revisions[revisions["file_status"] ==
FileStatusExtension.FAILED.get_status_extension()]
)["time_id"].tolist()
positions["blocked"] = (
revisions[revisions["file_status"] ==
FileStatusExtension.BLOCKED.get_status_extension()]
)["time_id"].tolist()
positions["blocked_all"].extend((
revisions[revisions["file_status"] ==
FileStatusExtension.BLOCKED.get_status_extension()]
)["time_id"].tolist())
positions["missing"] = (
revisions[revisions["file_status"] ==
FileStatusExtension.MISSING.get_status_extension()]
)["time_id"].tolist()
positions["compile_error"] = (
revisions[revisions["file_status"] ==
FileStatusExtension.COMPILE_ERROR.get_status_extension()]
)["time_id"].tolist()
return positions
class CaseStudyOverviewPlot(Plot, plot_name="case_study_overview_plot"):
"""Plot showing an overview of all revisions within a case study."""
NAME = 'case_study_overview_plot'
def plot(self, view_mode: bool) -> None:
style.use(self.plot_config.style())
data = _gen_overview_data(
self.plot_kwargs["show_blocked"], **self.plot_kwargs
)
fig_width = 4
dot_to_inch = 0.01389
line_width = 0.75
_, axis = plt.subplots(1, 1, figsize=(fig_width, 1))
commit_map: CommitMap = get_commit_map(
self.plot_kwargs["case_study"].project_name
)
linewidth = (
fig_width / len(commit_map.mapping_items())
) / dot_to_inch * line_width
axis.eventplot(
data["background"], linewidths=linewidth, colors=BACKGROUND_COLOR
)
axis.eventplot(
data["success"], linewidths=linewidth, colors=SUCCESS_COLOR
)
axis.eventplot(
data["failed"], linewidths=linewidth, colors=FAILED_COLOR
)
axis.eventplot(
data["missing"], linewidths=linewidth, colors=MISSING_COLOR
)
axis.eventplot(
data["compile_error"],
linewidths=linewidth,
colors=COMPILE_ERROR_COLOR
)
if self.plot_kwargs["show_all_blocked"]:
axis.eventplot(
data["blocked_all"], linewidths=linewidth, colors=BLOCKED_COLOR
)
else:
axis.eventplot(
data["blocked"], linewidths=linewidth, colors=BLOCKED_COLOR
)
axis.set_axis_off()
def calc_missing_revisions(
self, boundary_gradient: float
) -> tp.Set[FullCommitHash]:
case_study: CaseStudy = self.plot_kwargs["case_study"]
project_name: str = case_study.project_name
commit_map: CommitMap = get_commit_map(project_name)
def gen_revision_df(**plot_kwargs: tp.Any) -> DataFrame:
result_file_type: tp.Type[BaseReport] = plot_kwargs.get(
"report_type", EmptyReport
)
# load data
frame = FileStatusDatabase.get_data_for_project(
project_name, ["revision", "time_id", "file_status"],
commit_map,
case_study,
result_file_type=result_file_type,
tag_blocked=True
)
return frame
revision_df = gen_revision_df(**self.plot_kwargs)
revision_df.sort_values(by=['time_id'], inplace=True)
def head_cm_neighbours(
lhs_cm: ShortCommitHash, rhs_cm: ShortCommitHash
) -> bool:
return commit_map.short_time_id(
lhs_cm
) + 1 == commit_map.short_time_id(rhs_cm)
def should_insert_revision(last_row: tp.Any,
row: tp.Any) -> tp.Tuple[bool, float]:
return last_row["file_status"] != row["file_status"], 1.0
def get_commit_hash(row: tp.Any) -> ShortCommitHash:
return ShortCommitHash(str(row["revision"]))
return find_missing_revisions(
revision_df.iterrows(), get_local_project_git_path(project_name),
commit_map, should_insert_revision, get_commit_hash,
head_cm_neighbours
)
class CaseStudyOverviewGenerator(
PlotGenerator,
generator_name="cs-overview-plot",
options=[
REQUIRE_REPORT_TYPE, REQUIRE_CASE_STUDY, OPTIONAL_SHOW_BLOCKED,
OPTIONAL_SHOW_ALL_BLOCKED
]
):
"""Generates a case study overview plot."""
def generate(self) -> tp.List[Plot]:
return [CaseStudyOverviewPlot(self.plot_config, **self.plot_kwargs)]
| 34.541126 | 79 | 0.654343 |
acf9e030e33763f7c97b819adee5cf5bb9e56b89 | 1,759 | py | Python | app.py | bengusty/bengusty.github.io | 0f387e07013056bb1a97bbc594a01669d3707bcb | [
"MIT"
] | null | null | null | app.py | bengusty/bengusty.github.io | 0f387e07013056bb1a97bbc594a01669d3707bcb | [
"MIT"
] | null | null | null | app.py | bengusty/bengusty.github.io | 0f387e07013056bb1a97bbc594a01669d3707bcb | [
"MIT"
] | null | null | null | import json
import requests
from flask import Flask, request, Response, make_response
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/exchange_token')
def exchange_token():
code = request.args.get("code")
scope = request.args.get("scope")
expected_scope = "read,activity:read"
if not scope == expected_scope:
resp = make_response("The 'View data about your activities' checkbox must be checked in order to use the app", 400)
return resp
data = {"client_id": 6003, "client_secret": "0a3e2238fba9cae0d5a28dc1e58c0b2ef207c902", "code": code,
"grant_type": "authorization_code"}
response = requests.post("https://www.strava.com/oauth/token", data=data)
response_data = json.loads(response.text)
# refresh_token = response_data.get("refresh_token")
access_token = response_data.get("access_token")
activities_resp = requests.get("https://www.strava.com/api/v3/athlete/activities?per_page=100&scope={}".format(scope),
headers={"Authorization": "Bearer {}".format(access_token)})
if activities_resp.status_code != 200:
resp = make_response("Bad request", 400)
return resp
activities = json.loads(activities_resp.text)
activities_info = [{"id": activity.get("id"),
"distance": round(activity.get("distance") / 1609.344, 2),
"elevation": int(activity.get("total_elevation_gain") * 3.28084),
"name": activity.get("name"),
"date": activity.get("start_date_local")} for activity in activities]
return make_response(str(activities_info))
if __name__ == '__main__':
app.run()
| 35.897959 | 123 | 0.647527 |
acf9e05662430dd843edf40154bad17bf43abc8b | 39,417 | py | Python | discord/message.py | monospacedmagic/discord.py | 5107583c5db1eec6dc3a7300c0a1601913a1a9c2 | [
"MIT"
] | 2 | 2020-06-13T19:59:45.000Z | 2020-06-18T08:32:08.000Z | discord/message.py | monospacedmagic/discord.py | 5107583c5db1eec6dc3a7300c0a1601913a1a9c2 | [
"MIT"
] | null | null | null | discord/message.py | monospacedmagic/discord.py | 5107583c5db1eec6dc3a7300c0a1601913a1a9c2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2020 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import asyncio
import datetime
import re
import io
from . import utils
from .reaction import Reaction
from .emoji import Emoji
from .partial_emoji import PartialEmoji
from .calls import CallMessage
from .enums import MessageType, try_enum
from .errors import InvalidArgument, ClientException, HTTPException
from .embeds import Embed
from .member import Member
from .flags import MessageFlags
from .file import File
from .utils import escape_mentions
from .guild import Guild
class Attachment:
"""Represents an attachment from Discord.
Attributes
------------
id: :class:`int`
The attachment ID.
size: :class:`int`
The attachment size in bytes.
height: Optional[:class:`int`]
The attachment's height, in pixels. Only applicable to images and videos.
width: Optional[:class:`int`]
The attachment's width, in pixels. Only applicable to images and videos.
filename: :class:`str`
The attachment's filename.
url: :class:`str`
The attachment URL. If the message this attachment was attached
to is deleted, then this will 404.
proxy_url: :class:`str`
The proxy URL. This is a cached version of the :attr:`~Attachment.url` in the
case of images. When the message is deleted, this URL might be valid for a few
minutes or not valid at all.
"""
__slots__ = ('id', 'size', 'height', 'width', 'filename', 'url', 'proxy_url', '_http')
def __init__(self, *, data, state):
self.id = int(data['id'])
self.size = data['size']
self.height = data.get('height')
self.width = data.get('width')
self.filename = data['filename']
self.url = data.get('url')
self.proxy_url = data.get('proxy_url')
self._http = state.http
def is_spoiler(self):
""":class:`bool`: Whether this attachment contains a spoiler."""
return self.filename.startswith('SPOILER_')
def __repr__(self):
return '<Attachment id={0.id} filename={0.filename!r} url={0.url!r}>'.format(self)
async def save(self, fp, *, seek_begin=True, use_cached=False):
"""|coro|
Saves this attachment into a file-like object.
Parameters
-----------
fp: Union[:class:`io.BufferedIOBase`, :class:`os.PathLike`]
The file-like object to save this attachment to or the filename
to use. If a filename is passed then a file is created with that
filename and used instead.
seek_begin: :class:`bool`
Whether to seek to the beginning of the file after saving is
successfully done.
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
Raises
--------
HTTPException
Saving the attachment failed.
NotFound
The attachment was deleted.
Returns
--------
:class:`int`
The number of bytes written.
"""
data = await self.read(use_cached=use_cached)
if isinstance(fp, io.IOBase) and fp.writable():
written = fp.write(data)
if seek_begin:
fp.seek(0)
return written
else:
with open(fp, 'wb') as f:
return f.write(data)
async def read(self, *, use_cached=False):
"""|coro|
Retrieves the content of this attachment as a :class:`bytes` object.
.. versionadded:: 1.1
Parameters
-----------
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
Raises
------
HTTPException
Downloading the attachment failed.
Forbidden
You do not have permissions to access this attachment
NotFound
The attachment was deleted.
Returns
-------
:class:`bytes`
The contents of the attachment.
"""
url = self.proxy_url if use_cached else self.url
data = await self._http.get_from_cdn(url)
return data
async def to_file(self, *, use_cached=False):
"""|coro|
Converts the attachment into a :class:`File` suitable for sending via
:meth:`abc.Messageable.send`.
.. versionadded:: 1.3
Parameters
-----------
use_cached: :class:`bool`
Whether to use :attr:`proxy_url` rather than :attr:`url` when downloading
the attachment. This will allow attachments to be saved after deletion
more often, compared to the regular URL which is generally deleted right
after the message is deleted. Note that this can still fail to download
deleted attachments if too much time has passed and it does not work
on some types of attachments.
.. versionadded:: 1.4
Raises
------
HTTPException
Downloading the attachment failed.
Forbidden
You do not have permissions to access this attachment
NotFound
The attachment was deleted.
Returns
-------
:class:`File`
The attachment as a file suitable for sending.
"""
data = await self.read(use_cached=use_cached)
return File(io.BytesIO(data), filename=self.filename)
def flatten_handlers(cls):
prefix = len('_handle_')
cls._HANDLERS = {
key[prefix:]: value
for key, value in cls.__dict__.items()
if key.startswith('_handle_')
}
cls._CACHED_SLOTS = [
attr for attr in cls.__slots__ if attr.startswith('_cs_')
]
return cls
@flatten_handlers
class Message:
r"""Represents a message from Discord.
There should be no need to create one of these manually.
Attributes
-----------
tts: :class:`bool`
Specifies if the message was done with text-to-speech.
This can only be accurately received in :func:`on_message` due to
a discord limitation.
type: :class:`MessageType`
The type of message. In most cases this should not be checked, but it is helpful
in cases where it might be a system message for :attr:`system_content`.
author: :class:`abc.User`
A :class:`Member` that sent the message. If :attr:`channel` is a
private channel or the user has the left the guild, then it is a :class:`User` instead.
content: :class:`str`
The actual contents of the message.
nonce
The value used by the discord guild and the client to verify that the message is successfully sent.
This is typically non-important.
embeds: List[:class:`Embed`]
A list of embeds the message has.
channel: Union[:class:`abc.Messageable`]
The :class:`TextChannel` that the message was sent from.
Could be a :class:`DMChannel` or :class:`GroupChannel` if it's a private message.
call: Optional[:class:`CallMessage`]
The call that the message refers to. This is only applicable to messages of type
:attr:`MessageType.call`.
mention_everyone: :class:`bool`
Specifies if the message mentions everyone.
.. note::
This does not check if the ``@everyone`` or the ``@here`` text is in the message itself.
Rather this boolean indicates if either the ``@everyone`` or the ``@here`` text is in the message
**and** it did end up mentioning.
mentions: List[:class:`abc.User`]
A list of :class:`Member` that were mentioned. If the message is in a private message
then the list will be of :class:`User` instead. For messages that are not of type
:attr:`MessageType.default`\, this array can be used to aid in system messages.
For more information, see :attr:`system_content`.
.. warning::
The order of the mentions list is not in any particular order so you should
not rely on it. This is a discord limitation, not one with the library.
channel_mentions: List[:class:`abc.GuildChannel`]
A list of :class:`abc.GuildChannel` that were mentioned. If the message is in a private message
then the list is always empty.
role_mentions: List[:class:`Role`]
A list of :class:`Role` that were mentioned. If the message is in a private message
then the list is always empty.
id: :class:`int`
The message ID.
webhook_id: Optional[:class:`int`]
If this message was sent by a webhook, then this is the webhook ID's that sent this
message.
attachments: List[:class:`Attachment`]
A list of attachments given to a message.
pinned: :class:`bool`
Specifies if the message is currently pinned.
flags: :class:`MessageFlags`
Extra features of the message.
.. versionadded:: 1.3
reactions : List[:class:`Reaction`]
Reactions to a message. Reactions can be either custom emoji or standard unicode emoji.
activity: Optional[:class:`dict`]
The activity associated with this message. Sent with Rich-Presence related messages that for
example, request joining, spectating, or listening to or with another member.
It is a dictionary with the following optional keys:
- ``type``: An integer denoting the type of message activity being requested.
- ``party_id``: The party ID associated with the party.
application: Optional[:class:`dict`]
The rich presence enabled application associated with this message.
It is a dictionary with the following keys:
- ``id``: A string representing the application's ID.
- ``name``: A string representing the application's name.
- ``description``: A string representing the application's description.
- ``icon``: A string representing the icon ID of the application.
- ``cover_image``: A string representing the embed's image asset ID.
"""
__slots__ = ('_edited_timestamp', 'tts', 'content', 'channel', 'webhook_id',
'mention_everyone', 'embeds', 'id', 'mentions', 'author',
'_cs_channel_mentions', '_cs_raw_mentions', 'attachments',
'_cs_clean_content', '_cs_raw_channel_mentions', 'nonce', 'pinned',
'role_mentions', '_cs_raw_role_mentions', 'type', 'call', 'flags',
'_cs_system_content', '_cs_guild', '_state', 'reactions',
'application', 'activity')
def __init__(self, *, state, channel, data):
self._state = state
self.id = int(data['id'])
self.webhook_id = utils._get_as_snowflake(data, 'webhook_id')
self.reactions = [Reaction(message=self, data=d) for d in data.get('reactions', [])]
self.attachments = [Attachment(data=a, state=self._state) for a in data['attachments']]
self.embeds = [Embed.from_dict(a) for a in data['embeds']]
self.application = data.get('application')
self.activity = data.get('activity')
self.channel = channel
self._edited_timestamp = utils.parse_time(data['edited_timestamp'])
self.type = try_enum(MessageType, data['type'])
self.pinned = data['pinned']
self.flags = MessageFlags._from_value(data.get('flags', 0))
self.mention_everyone = data['mention_everyone']
self.tts = data['tts']
self.content = data['content']
self.nonce = data.get('nonce')
for handler in ('author', 'member', 'mentions', 'mention_roles', 'call', 'flags'):
try:
getattr(self, '_handle_%s' % handler)(data[handler])
except KeyError:
continue
def __repr__(self):
return '<Message id={0.id} channel={0.channel!r} type={0.type!r} author={0.author!r} flags={0.flags!r}>'.format(self)
def _try_patch(self, data, key, transform=None):
try:
value = data[key]
except KeyError:
pass
else:
if transform is None:
setattr(self, key, value)
else:
setattr(self, key, transform(value))
def _add_reaction(self, data, emoji, user_id):
reaction = utils.find(lambda r: r.emoji == emoji, self.reactions)
is_me = data['me'] = user_id == self._state.self_id
if reaction is None:
reaction = Reaction(message=self, data=data, emoji=emoji)
self.reactions.append(reaction)
else:
reaction.count += 1
if is_me:
reaction.me = is_me
return reaction
def _remove_reaction(self, data, emoji, user_id):
reaction = utils.find(lambda r: r.emoji == emoji, self.reactions)
if reaction is None:
# already removed?
raise ValueError('Emoji already removed?')
# if reaction isn't in the list, we crash. This means discord
# sent bad data, or we stored improperly
reaction.count -= 1
if user_id == self._state.self_id:
reaction.me = False
if reaction.count == 0:
# this raises ValueError if something went wrong as well.
self.reactions.remove(reaction)
return reaction
def _clear_emoji(self, emoji):
to_check = str(emoji)
for index, reaction in enumerate(self.reactions):
if str(reaction.emoji) == to_check:
break
else:
# didn't find anything so just return
return
del self.reactions[index]
return reaction
def _update(self, data):
handlers = self._HANDLERS
for key, value in data.items():
try:
handler = handlers[key]
except KeyError:
continue
else:
handler(self, value)
# clear the cached properties
for attr in self._CACHED_SLOTS:
try:
delattr(self, attr)
except AttributeError:
pass
def _handle_edited_timestamp(self, value):
self._edited_timestamp = utils.parse_time(value)
def _handle_pinned(self, value):
self.pinned = value
def _handle_flags(self, value):
self.flags = MessageFlags._from_value(value)
def _handle_application(self, value):
self.application = value
def _handle_activity(self, value):
self.activity = value
def _handle_mention_everyone(self, value):
self.mention_everyone = value
def _handle_tts(self, value):
self.tts = value
def _handle_type(self, value):
self.type = try_enum(MessageType, value)
def _handle_content(self, value):
self.content = value
def _handle_attachments(self, value):
self.attachments = [Attachment(data=a, state=self._state) for a in value]
def _handle_embeds(self, value):
self.embeds = [Embed.from_dict(data) for data in value]
def _handle_nonce(self, value):
self.nonce = value
def _handle_author(self, author):
self.author = self._state.store_user(author)
if isinstance(self.guild, Guild):
found = self.guild.get_member(self.author.id)
if found is not None:
self.author = found
def _handle_member(self, member):
# The gateway now gives us full Member objects sometimes with the following keys
# deaf, mute, joined_at, roles
# For the sake of performance I'm going to assume that the only
# field that needs *updating* would be the joined_at field.
# If there is no Member object (for some strange reason), then we can upgrade
# ourselves to a more "partial" member object.
author = self.author
try:
# Update member reference
if author.joined_at is None:
author.joined_at = utils.parse_time(member.get('joined_at'))
except AttributeError:
# It's a user here
# TODO: consider adding to cache here
self.author = Member._from_message(message=self, data=member)
def _handle_mentions(self, mentions):
self.mentions = r = []
guild = self.guild
state = self._state
if not isinstance(guild, Guild):
self.mentions = [state.store_user(m) for m in mentions]
return
for mention in filter(None, mentions):
id_search = int(mention['id'])
member = guild.get_member(id_search)
if member is not None:
r.append(member)
else:
r.append(Member._try_upgrade(data=mention, guild=guild, state=state))
def _handle_mention_roles(self, role_mentions):
self.role_mentions = []
if isinstance(self.guild, Guild):
for role_id in map(int, role_mentions):
role = self.guild.get_role(role_id)
if role is not None:
self.role_mentions.append(role)
def _handle_call(self, call):
if call is None or self.type is not MessageType.call:
self.call = None
return
# we get the participant source from the mentions array or
# the author
participants = []
for uid in map(int, call.get('participants', [])):
if uid == self.author.id:
participants.append(self.author)
else:
user = utils.find(lambda u: u.id == uid, self.mentions)
if user is not None:
participants.append(user)
call['participants'] = participants
self.call = CallMessage(message=self, **call)
@utils.cached_slot_property('_cs_guild')
def guild(self):
"""Optional[:class:`Guild`]: The guild that the message belongs to, if applicable."""
return getattr(self.channel, 'guild', None)
@utils.cached_slot_property('_cs_raw_mentions')
def raw_mentions(self):
"""List[:class:`int`]: A property that returns an array of user IDs matched with
the syntax of ``<@user_id>`` in the message content.
This allows you to receive the user IDs of mentioned users
even in a private message context.
"""
return [int(x) for x in re.findall(r'<@!?([0-9]+)>', self.content)]
@utils.cached_slot_property('_cs_raw_channel_mentions')
def raw_channel_mentions(self):
"""List[:class:`int`]: A property that returns an array of channel IDs matched with
the syntax of ``<#channel_id>`` in the message content.
"""
return [int(x) for x in re.findall(r'<#([0-9]+)>', self.content)]
@utils.cached_slot_property('_cs_raw_role_mentions')
def raw_role_mentions(self):
"""List[:class:`int`]: A property that returns an array of role IDs matched with
the syntax of ``<@&role_id>`` in the message content.
"""
return [int(x) for x in re.findall(r'<@&([0-9]+)>', self.content)]
@utils.cached_slot_property('_cs_channel_mentions')
def channel_mentions(self):
if self.guild is None:
return []
it = filter(None, map(self.guild.get_channel, self.raw_channel_mentions))
return utils._unique(it)
@utils.cached_slot_property('_cs_clean_content')
def clean_content(self):
"""A property that returns the content in a "cleaned up"
manner. This basically means that mentions are transformed
into the way the client shows it. e.g. ``<#id>`` will transform
into ``#name``.
This will also transform @everyone and @here mentions into
non-mentions.
.. note::
This *does not* escape markdown. If you want to escape
markdown then use :func:`utils.escape_markdown` along
with this function.
"""
transformations = {
re.escape('<#%s>' % channel.id): '#' + channel.name
for channel in self.channel_mentions
}
mention_transforms = {
re.escape('<@%s>' % member.id): '@' + member.display_name
for member in self.mentions
}
# add the <@!user_id> cases as well..
second_mention_transforms = {
re.escape('<@!%s>' % member.id): '@' + member.display_name
for member in self.mentions
}
transformations.update(mention_transforms)
transformations.update(second_mention_transforms)
if self.guild is not None:
role_transforms = {
re.escape('<@&%s>' % role.id): '@' + role.name
for role in self.role_mentions
}
transformations.update(role_transforms)
def repl(obj):
return transformations.get(re.escape(obj.group(0)), '')
pattern = re.compile('|'.join(transformations.keys()))
result = pattern.sub(repl, self.content)
return escape_mentions(result)
@property
def created_at(self):
""":class:`datetime.datetime`: The message's creation time in UTC."""
return utils.snowflake_time(self.id)
@property
def edited_at(self):
"""Optional[:class:`datetime.datetime`]: A naive UTC datetime object containing the edited time of the message."""
return self._edited_timestamp
@property
def jump_url(self):
""":class:`str`: Returns a URL that allows the client to jump to this message."""
guild_id = getattr(self.guild, 'id', '@me')
return 'https://discordapp.com/channels/{0}/{1.channel.id}/{1.id}'.format(guild_id, self)
def is_system(self):
""":class:`bool`: Whether the message is a system message.
.. versionadded:: 1.3
"""
return self.type is not MessageType.default
@utils.cached_slot_property('_cs_system_content')
def system_content(self):
r"""A property that returns the content that is rendered
regardless of the :attr:`Message.type`.
In the case of :attr:`MessageType.default`\, this just returns the
regular :attr:`Message.content`. Otherwise this returns an English
message denoting the contents of the system message.
"""
if self.type is MessageType.default:
return self.content
if self.type is MessageType.pins_add:
return '{0.name} pinned a message to this channel.'.format(self.author)
if self.type is MessageType.recipient_add:
return '{0.name} added {1.name} to the group.'.format(self.author, self.mentions[0])
if self.type is MessageType.recipient_remove:
return '{0.name} removed {1.name} from the group.'.format(self.author, self.mentions[0])
if self.type is MessageType.channel_name_change:
return '{0.author.name} changed the channel name: {0.content}'.format(self)
if self.type is MessageType.channel_icon_change:
return '{0.author.name} changed the channel icon.'.format(self)
if self.type is MessageType.new_member:
formats = [
"{0} just joined the server - glhf!",
"{0} just joined. Everyone, look busy!",
"{0} just joined. Can I get a heal?",
"{0} joined your party.",
"{0} joined. You must construct additional pylons.",
"Ermagherd. {0} is here.",
"Welcome, {0}. Stay awhile and listen.",
"Welcome, {0}. We were expecting you ( ͡° ͜ʖ ͡°)",
"Welcome, {0}. We hope you brought pizza.",
"Welcome {0}. Leave your weapons by the door.",
"A wild {0} appeared.",
"Swoooosh. {0} just landed.",
"Brace yourselves. {0} just joined the server.",
"{0} just joined... or did they?",
"{0} just arrived. Seems OP - please nerf.",
"{0} just slid into the server.",
"A {0} has spawned in the server.",
"Big {0} showed up!",
"Where’s {0}? In the server!",
"{0} hopped into the server. Kangaroo!!",
"{0} just showed up. Hold my beer.",
"Challenger approaching - {0} has appeared!",
"It's a bird! It's a plane! Nevermind, it's just {0}.",
"It's {0}! Praise the sun! \\[T]/",
"Never gonna give {0} up. Never gonna let {0} down.",
"{0} has joined the battle bus.",
"Cheers, love! {0}'s here!",
"Hey! Listen! {0} has joined!",
"We've been expecting you {0}",
"It's dangerous to go alone, take {0}!",
"{0} has joined the server! It's super effective!",
"Cheers, love! {0} is here!",
"{0} is here, as the prophecy foretold.",
"{0} has arrived. Party's over.",
"Ready player {0}",
"{0} is here to kick butt and chew bubblegum. And {0} is all out of gum.",
"Hello. Is it {0} you're looking for?",
"{0} has joined. Stay a while and listen!",
"Roses are red, violets are blue, {0} joined this server with you",
]
# manually reconstruct the epoch with millisecond precision, because
# datetime.datetime.timestamp() doesn't return the exact posix
# timestamp with the precision that we need
created_at_ms = int((self.created_at - datetime.datetime(1970, 1, 1)).total_seconds() * 1000)
return formats[created_at_ms % len(formats)].format(self.author.name)
if self.type is MessageType.call:
# we're at the call message type now, which is a bit more complicated.
# we can make the assumption that Message.channel is a PrivateChannel
# with the type ChannelType.group or ChannelType.private
call_ended = self.call.ended_timestamp is not None
if self.channel.me in self.call.participants:
return '{0.author.name} started a call.'.format(self)
elif call_ended:
return 'You missed a call from {0.author.name}'.format(self)
else:
return '{0.author.name} started a call \N{EM DASH} Join the call.'.format(self)
if self.type is MessageType.premium_guild_subscription:
return '{0.author.name} just boosted the server!'.format(self)
if self.type is MessageType.premium_guild_tier_1:
return '{0.author.name} just boosted the server! {0.guild} has achieved **Level 1!**'.format(self)
if self.type is MessageType.premium_guild_tier_2:
return '{0.author.name} just boosted the server! {0.guild} has achieved **Level 2!**'.format(self)
if self.type is MessageType.premium_guild_tier_3:
return '{0.author.name} just boosted the server! {0.guild} has achieved **Level 3!**'.format(self)
if self.type is MessageType.channel_follow_add:
return '{0.author.name} has added {0.content} to this channel'.format(self)
async def delete(self, *, delay=None):
"""|coro|
Deletes the message.
Your own messages could be deleted without any proper permissions. However to
delete other people's messages, you need the :attr:`~Permissions.manage_messages`
permission.
.. versionchanged:: 1.1
Added the new ``delay`` keyword-only parameter.
Parameters
-----------
delay: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message. If the deletion fails then it is silently ignored.
Raises
------
Forbidden
You do not have proper permissions to delete the message.
NotFound
The message was deleted already
HTTPException
Deleting the message failed.
"""
if delay is not None:
async def delete():
await asyncio.sleep(delay)
try:
await self._state.http.delete_message(self.channel.id, self.id)
except HTTPException:
pass
asyncio.ensure_future(delete(), loop=self._state.loop)
else:
await self._state.http.delete_message(self.channel.id, self.id)
async def edit(self, **fields):
"""|coro|
Edits the message.
The content must be able to be transformed into a string via ``str(content)``.
.. versionchanged:: 1.3
The ``suppress`` keyword-only parameter was added.
Parameters
-----------
content: Optional[:class:`str`]
The new content to replace the message with.
Could be ``None`` to remove the content.
embed: Optional[:class:`Embed`]
The new embed to replace the original with.
Could be ``None`` to remove the embed.
suppress: :class:`bool`
Whether to suppress embeds for the message. This removes
all the embeds if set to ``True``. If set to ``False``
this brings the embeds back if they were suppressed.
Using this parameter requires :attr:`~.Permissions.manage_messages`.
delete_after: Optional[:class:`float`]
If provided, the number of seconds to wait in the background
before deleting the message we just edited. If the deletion fails,
then it is silently ignored.
Raises
-------
HTTPException
Editing the message failed.
Forbidden
Tried to suppress a message without permissions or
edited a message's content or embed that isn't yours.
"""
try:
content = fields['content']
except KeyError:
pass
else:
if content is not None:
fields['content'] = str(content)
try:
embed = fields['embed']
except KeyError:
pass
else:
if embed is not None:
fields['embed'] = embed.to_dict()
try:
suppress = fields.pop('suppress')
except KeyError:
pass
else:
flags = MessageFlags._from_value(self.flags.value)
flags.suppress_embeds = suppress
fields['flags'] = flags.value
delete_after = fields.pop('delete_after', None)
if fields:
data = await self._state.http.edit_message(self.channel.id, self.id, **fields)
self._update(data)
if delete_after is not None:
await self.delete(delay=delete_after)
async def publish(self):
"""|coro|
Publishes this message to your announcement channel.
If the message is not your own then the :attr:`~Permissions.manage_messages`
permission is needed.
Raises
-------
Forbidden
You do not have the proper permissions to publish this message.
HTTPException
Publishing the message failed.
"""
await self._state.http.publish_message(self.channel.id, self.id)
async def pin(self):
"""|coro|
Pins the message.
You must have the :attr:`~Permissions.manage_messages` permission to do
this in a non-private channel context.
Raises
-------
Forbidden
You do not have permissions to pin the message.
NotFound
The message or channel was not found or deleted.
HTTPException
Pinning the message failed, probably due to the channel
having more than 50 pinned messages.
"""
await self._state.http.pin_message(self.channel.id, self.id)
self.pinned = True
async def unpin(self):
"""|coro|
Unpins the message.
You must have the :attr:`~Permissions.manage_messages` permission to do
this in a non-private channel context.
Raises
-------
Forbidden
You do not have permissions to unpin the message.
NotFound
The message or channel was not found or deleted.
HTTPException
Unpinning the message failed.
"""
await self._state.http.unpin_message(self.channel.id, self.id)
self.pinned = False
async def add_reaction(self, emoji):
"""|coro|
Add a reaction to the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
You must have the :attr:`~Permissions.read_message_history` permission
to use this. If nobody else has reacted to the message using this
emoji, the :attr:`~Permissions.add_reactions` permission is required.
Parameters
------------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to react with.
Raises
--------
HTTPException
Adding the reaction failed.
Forbidden
You do not have the proper permissions to react to the message.
NotFound
The emoji you specified was not found.
InvalidArgument
The emoji parameter is invalid.
"""
emoji = self._emoji_reaction(emoji)
await self._state.http.add_reaction(self.channel.id, self.id, emoji)
async def remove_reaction(self, emoji, member):
"""|coro|
Remove a reaction by the member from the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
If the reaction is not your own (i.e. ``member`` parameter is not you) then
the :attr:`~Permissions.manage_messages` permission is needed.
The ``member`` parameter must represent a member and meet
the :class:`abc.Snowflake` abc.
Parameters
------------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to remove.
member: :class:`abc.Snowflake`
The member for which to remove the reaction.
Raises
--------
HTTPException
Removing the reaction failed.
Forbidden
You do not have the proper permissions to remove the reaction.
NotFound
The member or emoji you specified was not found.
InvalidArgument
The emoji parameter is invalid.
"""
emoji = self._emoji_reaction(emoji)
if member.id == self._state.self_id:
await self._state.http.remove_own_reaction(self.channel.id, self.id, emoji)
else:
await self._state.http.remove_reaction(self.channel.id, self.id, emoji, member.id)
async def clear_reaction(self, emoji):
"""|coro|
Clears a specific reaction from the message.
The emoji may be a unicode emoji or a custom guild :class:`Emoji`.
You need the :attr:`~Permissions.manage_messages` permission to use this.
.. versionadded:: 1.3
Parameters
-----------
emoji: Union[:class:`Emoji`, :class:`Reaction`, :class:`PartialEmoji`, :class:`str`]
The emoji to clear.
Raises
--------
HTTPException
Clearing the reaction failed.
Forbidden
You do not have the proper permissions to clear the reaction.
NotFound
The emoji you specified was not found.
InvalidArgument
The emoji parameter is invalid.
"""
emoji = self._emoji_reaction(emoji)
await self._state.http.clear_single_reaction(self.channel.id, self.id, emoji)
@staticmethod
def _emoji_reaction(emoji):
if isinstance(emoji, Reaction):
emoji = emoji.emoji
if isinstance(emoji, Emoji):
return '%s:%s' % (emoji.name, emoji.id)
if isinstance(emoji, PartialEmoji):
return emoji._as_reaction()
if isinstance(emoji, str):
# Reactions can be in :name:id format, but not <:name:id>.
# No existing emojis have <> in them, so this should be okay.
return emoji.strip('<>')
raise InvalidArgument('emoji argument must be str, Emoji, or Reaction not {.__class__.__name__}.'.format(emoji))
async def clear_reactions(self):
"""|coro|
Removes all the reactions from the message.
You need the :attr:`~Permissions.manage_messages` permission to use this.
Raises
--------
HTTPException
Removing the reactions failed.
Forbidden
You do not have the proper permissions to remove all the reactions.
"""
await self._state.http.clear_reactions(self.channel.id, self.id)
async def ack(self):
"""|coro|
Marks this message as read.
The user must not be a bot user.
Raises
-------
HTTPException
Acking failed.
ClientException
You must not be a bot user.
"""
state = self._state
if state.is_bot:
raise ClientException('Must not be a bot account to ack messages.')
return await state.http.ack_message(self.channel.id, self.id)
| 37.185849 | 125 | 0.604511 |
acf9e140c114f31caee4942a105b330888606b14 | 5,884 | py | Python | classeAutor.py | carollimalima/trabson | 8b2e29c1f0c79e6be71c904e1ffff13ac92b4277 | [
"MIT"
] | null | null | null | classeAutor.py | carollimalima/trabson | 8b2e29c1f0c79e6be71c904e1ffff13ac92b4277 | [
"MIT"
] | null | null | null | classeAutor.py | carollimalima/trabson | 8b2e29c1f0c79e6be71c904e1ffff13ac92b4277 | [
"MIT"
] | null | null | null | from datetime import datetime
import psycopg2
class Autor:
def __init__(self,nome,email):
self._nome = nome
self._email = email
self._trabalhos = []
def _get_nome(self):
return self._nome
def _get_email(self):
return self._email
def _get_trabalhos(self):
return self._trabalhos
def _get_cod(self):
return int(self._cod)
def _set_nome(self, nome):
self._nome = nome
def _set_email(self, email):
self._email = email
def _set_cod(self, cod):
self._cod = int(cod)
def _set_trabalhos(self, t):
try:
self._trabalhos.append(t)
except:
self._trabalhos = []
self._trabalhos.append(t)
nome = property(_get_nome,_set_nome)
email = property(_get_email,_set_email)
cod = property(_get_cod,_set_cod)
trabalhos = property(_get_trabalhos,_set_trabalhos)
class Trabalho:
def __init__(self,conteudo,nota,dataEntrega,titulo):
self._conteudo = conteudo
self._nota = nota
self._dataEntrega = dataEntrega
self._titulo = titulo
def _get_cod(self):
return self._cod
def _get_conteudo(self):
return self._conteudo
def _get_nota(self):
return self._nota
def _get_dataEntrega(self):
return self._dataEntrega
def _get_dtHoraAtualizacao(self):
return self._dtHoraAtualizacao
def _set_cod(self,cod):
self._cod = cod
def _set_conteudo(self,conteudo):
self._conteudo = conteudo
def _set_nota(self,nota):
self._nota = nota
def _set_dataEntrega(self,data):
self._dataEntrega = data
def _set_dtHoraAtualizacao(self,data):
self._dtHoraAtualizacao = data
cod = property(_get_cod,_set_cod)
conteudo = property(_get_conteudo,_set_conteudo)
nota = property(_get_nota,_set_nota)
dataEntrega = property(_get_dataEntrega,_set_dataEntrega)
dtHoraAtualizacao = property(_get_dtHoraAtualizacao,_set_dtHoraAtualizacao)
t1 = Trabalho("fazer pao",10,"25/04/2019","cacetinho")
t2 = Trabalho("refutar professor de historia",0,"25/04/2019","vc ta rindo?")
t3 = Trabalho("assar pao",10,"25/04/2019","cacetinho")
a1 = Autor("nando moura", "nando_cnoura@gmail.com")
#a1.trabalhos=t3
#t1.cod=1
a1._set_trabalhos(t1)
#a1._set_trabalhos(t2)
#a1._set_trabalhos(t3)
#print((a1.trabalhos[0]).conteudo)
#print((a1.trabalhos[1]).conteudo)
#print((a1.trabalhos[2]).conteudo)
class Dao:
def __init__(self):
self._conexao = "dbname=trabalhoAutor user=postgres password=postgres host=localhost port=5432"
class AutorDao(Dao):
def __init__(self):
#super().__init__(self)
self._conexao = "dbname=trabalhoAutor user=postgres password=postgres host=localhost port=5432"
def salvar(self, autor):
verifica=hasattr(autor, 'cod')
if (verifica):
print("to verificando heinkkkk")
con = psycopg2.connect(self._conexao)
cursor = con.cursor()
cursor.execute('UPDATE "Autor" SET nome = %s, email = %s WHERE cod = %s',(autor.nome,autor.email,int(autor.cod)))
con.commit()
cursor.close()
else:
print("coékkk")
con = psycopg2.connect(self._conexao)
cursor = con.cursor()
cursor.execute('insert into "Autor" (nome,email) values (%s,%s) RETURNING cod', (autor.nome, autor.email))
codigo = (cursor.fetchone())[0]
con.commit()
autor.cod = int(codigo)
cursor.close()
def buscar(self,cod):
con = psycopg2.connect(self._conexao)
cursor = con.cursor()
cursor.execute('SELECT * FROM "Autor" WHERE cod = %s',[cod])
b = cursor.fetchone()
cursor.close()
return b
def excluir(self,cod):
con = psycopg2.connect(self._conexao)
cursor = con.cursor()
cursor.execute('DELETE FROM "Autor" WHERE cod = %s',[cod])
con.commit()
cursor.close()
def listar(self):
con = psycopg2.connect(self._conexao)
v=[]
with con as c:
cursor = c.cursor()
cursor.execute('select * from "Autor"')
for linha in cursor.fetchall():
v.append(linha)
cursor.close()
return v
#a2 = Autor("to com raiva","GRRRR@gmail")
#adao = AutorDao()
#adao.salvar(a2)
#print(a2.cod)
#print("autor antes: ",a2.nome)
#a3 = Autor("borboleta","borboleta@gmail")
#a3.cod=756
#print("autor antes: ",a3.nome)
#a2.nome="eh os guri"
#a2.email="kkk@gmail"
#print("autor DEPOIS : ",a3.nome)
adao = AutorDao()
#print(adao.buscar(756))
#adao.excluir(737)
print(adao.listar())
"""
f= F ("press F to jailson mendes", )
f= F ("press F to jailson mendes",69)
f1 = F("press F to hater of raffa moreira",777)
fdao = salvar(f)
if f nao tem codigo, entao é pq ele nunca foi inserido
if f tem codigo, entao é pq ele ja foi inserido, talvez o usuario queira
modificar ele. aaaah para ne
"""
#select dao antes de inserir
#salvar, conferir se o codigo de um deles foi passado
"""
def salvar(self,autor):
i=0
l = len(autor.trabalhos)-1
while(i<=l):
verifica=hasattr(autor.trabalhos[i], 'cod')
if (verifica):
print("caraio deu tudo errado vai arrumar isso dai")
else:
print("coékkk")
con = psycopg2.connect(self._conexao)
cursor = con.cursor()
cursor.execute('insert into "Autor" (nome,email) values (%s,%s)',[autor.nome,autor.email])
cursor.close()
i=i+1
"""
| 21.632353 | 125 | 0.596193 |
acf9e25fdf31c03c3d1315b7fb1373506536276b | 2,367 | py | Python | examples/src/main/python/mllib/decision_tree_regression_example.py | yqtaowhu/Spark | a381bce7285ec30f58f28f523dfcfe0c13221bbf | [
"Apache-2.0"
] | 2,327 | 2020-03-01T09:47:34.000Z | 2021-11-25T12:38:42.000Z | examples/src/main/python/mllib/decision_tree_regression_example.py | yqtaowhu/Spark | a381bce7285ec30f58f28f523dfcfe0c13221bbf | [
"Apache-2.0"
] | 607 | 2016-12-12T21:56:43.000Z | 2019-11-14T22:21:06.000Z | examples/src/main/python/mllib/decision_tree_regression_example.py | yqtaowhu/Spark | a381bce7285ec30f58f28f523dfcfe0c13221bbf | [
"Apache-2.0"
] | 686 | 2020-03-03T17:24:51.000Z | 2021-11-25T23:39:12.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Decision Tree Regression Example.
"""
from __future__ import print_function
from pyspark import SparkContext
# $example on$
from pyspark.mllib.tree import DecisionTree, DecisionTreeModel
from pyspark.mllib.util import MLUtils
# $example off$
if __name__ == "__main__":
sc = SparkContext(appName="PythonDecisionTreeRegressionExample")
# $example on$
# Load and parse the data file into an RDD of LabeledPoint.
data = MLUtils.loadLibSVMFile(sc, 'data/mllib/sample_libsvm_data.txt')
# Split the data into training and test sets (30% held out for testing)
(trainingData, testData) = data.randomSplit([0.7, 0.3])
# Train a DecisionTree model.
# Empty categoricalFeaturesInfo indicates all features are continuous.
model = DecisionTree.trainRegressor(trainingData, categoricalFeaturesInfo={},
impurity='variance', maxDepth=5, maxBins=32)
# Evaluate model on test instances and compute test error
predictions = model.predict(testData.map(lambda x: x.features))
labelsAndPredictions = testData.map(lambda lp: lp.label).zip(predictions)
testMSE = labelsAndPredictions.map(lambda lp: (lp[0] - lp[1]) * (lp[0] - lp[1])).sum() /\
float(testData.count())
print('Test Mean Squared Error = ' + str(testMSE))
print('Learned regression tree model:')
print(model.toDebugString())
# Save and load model
model.save(sc, "target/tmp/myDecisionTreeRegressionModel")
sameModel = DecisionTreeModel.load(sc, "target/tmp/myDecisionTreeRegressionModel")
# $example off$
| 41.526316 | 93 | 0.731305 |
acf9e2bd8447efe2be5badf5a5ac16363ea0d3ac | 60 | py | Python | myvenv/lib/python3.5/site-packages/floppyforms/gis/__init__.py | tuvapp/tuvappcom | 5ca2be19f4b0c86a1d4a9553711a4da9d3f32841 | [
"MIT"
] | 259 | 2015-01-05T21:54:42.000Z | 2019-07-25T14:13:29.000Z | myvenv/lib/python3.5/site-packages/floppyforms/gis/__init__.py | tuvapp/tuvappcom | 5ca2be19f4b0c86a1d4a9553711a4da9d3f32841 | [
"MIT"
] | 80 | 2015-01-19T19:17:02.000Z | 2019-07-31T09:01:40.000Z | myvenv/lib/python3.5/site-packages/floppyforms/gis/__init__.py | tuvapp/tuvappcom | 5ca2be19f4b0c86a1d4a9553711a4da9d3f32841 | [
"MIT"
] | 77 | 2015-01-19T19:38:49.000Z | 2019-07-24T01:38:52.000Z | # flake8: noqa
from .fields import *
from .widgets import *
| 15 | 22 | 0.716667 |
acf9e2e0b0e91fbabe3919704acf61ad59c363a7 | 3,797 | py | Python | python/atomix/primitive/leader/__init__.py | tomikazi/atomix-api | 02c9ecccae4e377d09c1afa5ef2db9bf377991f3 | [
"Apache-2.0"
] | null | null | null | python/atomix/primitive/leader/__init__.py | tomikazi/atomix-api | 02c9ecccae4e377d09c1afa5ef2db9bf377991f3 | [
"Apache-2.0"
] | null | null | null | python/atomix/primitive/leader/__init__.py | tomikazi/atomix-api | 02c9ecccae4e377d09c1afa5ef2db9bf377991f3 | [
"Apache-2.0"
] | null | null | null | # Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: atomix/primitive/leader/latch.proto
# plugin: python-betterproto
from dataclasses import dataclass
from typing import AsyncIterator, List, Optional
import betterproto
import grpclib
class EventType(betterproto.Enum):
NONE = 0
CHANGE = 1
@dataclass(eq=False, repr=False)
class LatchRequest(betterproto.Message):
headers: "__primitive__.RequestHeaders" = betterproto.message_field(1)
def __post_init__(self) -> None:
super().__post_init__()
@dataclass(eq=False, repr=False)
class LatchResponse(betterproto.Message):
headers: "__primitive__.ResponseHeaders" = betterproto.message_field(1)
latch: "Latch" = betterproto.message_field(2)
def __post_init__(self) -> None:
super().__post_init__()
@dataclass(eq=False, repr=False)
class GetRequest(betterproto.Message):
headers: "__primitive__.RequestHeaders" = betterproto.message_field(1)
def __post_init__(self) -> None:
super().__post_init__()
@dataclass(eq=False, repr=False)
class GetResponse(betterproto.Message):
headers: "__primitive__.ResponseHeaders" = betterproto.message_field(1)
latch: "Latch" = betterproto.message_field(2)
def __post_init__(self) -> None:
super().__post_init__()
@dataclass(eq=False, repr=False)
class EventsRequest(betterproto.Message):
headers: "__primitive__.RequestHeaders" = betterproto.message_field(1)
def __post_init__(self) -> None:
super().__post_init__()
@dataclass(eq=False, repr=False)
class EventsResponse(betterproto.Message):
headers: "__primitive__.ResponseHeaders" = betterproto.message_field(1)
event: "Event" = betterproto.message_field(2)
def __post_init__(self) -> None:
super().__post_init__()
@dataclass(eq=False, repr=False)
class Event(betterproto.Message):
type: "EventType" = betterproto.enum_field(1)
latch: "Latch" = betterproto.message_field(2)
def __post_init__(self) -> None:
super().__post_init__()
@dataclass(eq=False, repr=False)
class Latch(betterproto.Message):
meta: "_meta__.ObjectMeta" = betterproto.message_field(1)
leader: str = betterproto.string_field(2)
participants: List[str] = betterproto.string_field(3)
def __post_init__(self) -> None:
super().__post_init__()
class LeaderLatchServiceStub(betterproto.ServiceStub):
"""Leader latch service"""
async def latch(
self, *, headers: "__primitive__.RequestHeaders" = None
) -> "LatchResponse":
"""Latch attempts to acquire the leader latch"""
request = LatchRequest()
if headers is not None:
request.headers = headers
return await self._unary_unary(
"/atomix.primitive.leader.LeaderLatchService/Latch", request, LatchResponse
)
async def get(
self, *, headers: "__primitive__.RequestHeaders" = None
) -> "GetResponse":
"""Get gets the current leader"""
request = GetRequest()
if headers is not None:
request.headers = headers
return await self._unary_unary(
"/atomix.primitive.leader.LeaderLatchService/Get", request, GetResponse
)
async def events(
self, *, headers: "__primitive__.RequestHeaders" = None
) -> AsyncIterator["EventsResponse"]:
"""Events listens for leader change events"""
request = EventsRequest()
if headers is not None:
request.headers = headers
async for response in self._unary_stream(
"/atomix.primitive.leader.LeaderLatchService/Events",
request,
EventsResponse,
):
yield response
from .. import meta as _meta__
from ... import primitive as __primitive__
| 28.335821 | 87 | 0.685278 |
acf9e385fdf5e94c5d0690dff3d6cc11db7d14dc | 490 | py | Python | pyleecan/Methods/Slot/SlotW60/get_surface_wind.py | harshasunder-1/pyleecan | 32ae60f98b314848eb9b385e3652d7fc50a77420 | [
"Apache-2.0"
] | 2 | 2020-08-28T14:54:55.000Z | 2021-03-13T19:34:45.000Z | pyleecan/Methods/Slot/SlotW60/get_surface_wind.py | harshasunder-1/pyleecan | 32ae60f98b314848eb9b385e3652d7fc50a77420 | [
"Apache-2.0"
] | null | null | null | pyleecan/Methods/Slot/SlotW60/get_surface_wind.py | harshasunder-1/pyleecan | 32ae60f98b314848eb9b385e3652d7fc50a77420 | [
"Apache-2.0"
] | null | null | null | def get_surface_wind(self, alpha=0, delta=0):
"""Return the full winding surface
Parameters
----------
self : SlotW60
A SlotW60 object
alpha : float
float number for rotation (Default value = 0) [rad]
delta : complex
complex number for translation (Default value = 0)
Returns
-------
surf_wind: Surface
Surface corresponding to the Winding Area
"""
raise Exception("get_surface_wind not available for SlotW60")
| 24.5 | 65 | 0.628571 |
acf9e4a84cf3d84762a8837f57fac262017afa0a | 10,861 | py | Python | compose/parallel.py | pareshmg/compose | cba758361499d74ef26bf281b73206e6dc12b5c9 | [
"Apache-2.0"
] | 2 | 2020-08-30T12:57:11.000Z | 2021-01-21T13:17:43.000Z | compose/parallel.py | pareshmg/compose | cba758361499d74ef26bf281b73206e6dc12b5c9 | [
"Apache-2.0"
] | 20 | 2020-09-07T16:12:31.000Z | 2022-03-29T22:05:14.000Z | compose/parallel.py | pareshmg/compose | cba758361499d74ef26bf281b73206e6dc12b5c9 | [
"Apache-2.0"
] | 3 | 2020-09-22T02:56:37.000Z | 2021-03-15T10:31:24.000Z | import _thread as thread
import logging
import operator
import sys
from queue import Empty
from queue import Queue
from threading import Lock
from threading import Semaphore
from threading import Thread
from docker.errors import APIError
from docker.errors import ImageNotFound
from compose.cli.colors import green
from compose.cli.colors import red
from compose.cli.signals import ShutdownException
from compose.const import PARALLEL_LIMIT
from compose.errors import HealthCheckFailed
from compose.errors import NoHealthCheckConfigured
from compose.errors import OperationFailedError
log = logging.getLogger(__name__)
STOP = object()
class GlobalLimit:
"""Simple class to hold a global semaphore limiter for a project. This class
should be treated as a singleton that is instantiated when the project is.
"""
global_limiter = Semaphore(PARALLEL_LIMIT)
@classmethod
def set_global_limit(cls, value):
if value is None:
value = PARALLEL_LIMIT
cls.global_limiter = Semaphore(value)
def parallel_execute_watch(events, writer, errors, results, msg, get_name, fail_check):
""" Watch events from a parallel execution, update status and fill errors and results.
Returns exception to re-raise.
"""
error_to_reraise = None
for obj, result, exception in events:
if exception is None:
if fail_check is not None and fail_check(obj):
writer.write(msg, get_name(obj), 'failed', red)
else:
writer.write(msg, get_name(obj), 'done', green)
results.append(result)
elif isinstance(exception, ImageNotFound):
# This is to bubble up ImageNotFound exceptions to the client so we
# can prompt the user if they want to rebuild.
errors[get_name(obj)] = exception.explanation
writer.write(msg, get_name(obj), 'error', red)
error_to_reraise = exception
elif isinstance(exception, APIError):
errors[get_name(obj)] = exception.explanation
writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured)):
errors[get_name(obj)] = exception.msg
writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, UpstreamError):
writer.write(msg, get_name(obj), 'error', red)
else:
errors[get_name(obj)] = exception
error_to_reraise = exception
return error_to_reraise
def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None, fail_check=None):
"""Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
get_deps called on object must return a collection with its dependencies.
get_name called on object must return its name.
fail_check is an additional failure check for cases that should display as a failure
in the CLI logs, but don't raise an exception (such as attempting to start 0 containers)
"""
objects = list(objects)
stream = sys.stderr
if ParallelStreamWriter.instance:
writer = ParallelStreamWriter.instance
else:
writer = ParallelStreamWriter(stream)
for obj in objects:
writer.add_object(msg, get_name(obj))
for obj in objects:
writer.write_initial(msg, get_name(obj))
events = parallel_execute_iter(objects, func, get_deps, limit)
errors = {}
results = []
error_to_reraise = parallel_execute_watch(
events, writer, errors, results, msg, get_name, fail_check
)
for obj_name, error in errors.items():
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
if error_to_reraise:
raise error_to_reraise
return results, errors
def _no_deps(x):
return []
class State:
"""
Holds the state of a partially-complete parallel operation.
state.started: objects being processed
state.finished: objects which have been processed
state.failed: objects which either failed or whose dependencies failed
"""
def __init__(self, objects):
self.objects = objects
self.started = set()
self.finished = set()
self.failed = set()
def is_done(self):
return len(self.finished) + len(self.failed) >= len(self.objects)
def pending(self):
return set(self.objects) - self.started - self.finished - self.failed
class NoLimit:
def __enter__(self):
pass
def __exit__(self, *ex):
pass
def parallel_execute_iter(objects, func, get_deps, limit):
"""
Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
Returns an iterator of tuples which look like:
# if func returned normally when run on object
(object, result, None)
# if func raised an exception when run on object
(object, None, exception)
# if func raised an exception when run on one of object's dependencies
(object, None, UpstreamError())
"""
if get_deps is None:
get_deps = _no_deps
if limit is None:
limiter = NoLimit()
else:
limiter = Semaphore(limit)
results = Queue()
state = State(objects)
while True:
feed_queue(objects, func, get_deps, results, state, limiter)
try:
event = results.get(timeout=0.1)
except Empty:
continue
# See https://github.com/docker/compose/issues/189
except thread.error:
raise ShutdownException()
if event is STOP:
break
obj, _, exception = event
if exception is None:
log.debug('Finished processing: {}'.format(obj))
state.finished.add(obj)
else:
log.debug('Failed: {}'.format(obj))
state.failed.add(obj)
yield event
def producer(obj, func, results, limiter):
"""
The entry point for a producer thread which runs func on a single object.
Places a tuple on the results queue once func has either returned or raised.
"""
with limiter, GlobalLimit.global_limiter:
try:
result = func(obj)
results.put((obj, result, None))
except Exception as e:
results.put((obj, None, e))
def feed_queue(objects, func, get_deps, results, state, limiter):
"""
Starts producer threads for any objects which are ready to be processed
(i.e. they have no dependencies which haven't been successfully processed).
Shortcuts any objects whose dependencies have failed and places an
(object, None, UpstreamError()) tuple on the results queue.
"""
pending = state.pending()
log.debug('Pending: {}'.format(pending))
for obj in pending:
deps = get_deps(obj)
try:
if any(dep[0] in state.failed for dep in deps):
log.debug('{} has upstream errors - not processing'.format(obj))
results.put((obj, None, UpstreamError()))
state.failed.add(obj)
elif all(
dep not in objects or (
dep in state.finished and (not ready_check or ready_check(dep))
) for dep, ready_check in deps
):
log.debug('Starting producer thread for {}'.format(obj))
t = Thread(target=producer, args=(obj, func, results, limiter))
t.daemon = True
t.start()
state.started.add(obj)
except (HealthCheckFailed, NoHealthCheckConfigured) as e:
log.debug(
'Healthcheck for service(s) upstream of {} failed - '
'not processing'.format(obj)
)
results.put((obj, None, e))
if state.is_done():
results.put(STOP)
class UpstreamError(Exception):
pass
class ParallelStreamWriter:
"""Write out messages for operations happening in parallel.
Each operation has its own line, and ANSI code characters are used
to jump to the correct line, and write over the line.
"""
noansi = False
lock = Lock()
instance = None
@classmethod
def set_noansi(cls, value=True):
cls.noansi = value
def __init__(self, stream):
self.stream = stream
self.lines = []
self.width = 0
ParallelStreamWriter.instance = self
def add_object(self, msg, obj_index):
if msg is None:
return
self.lines.append(msg + obj_index)
self.width = max(self.width, len(msg + ' ' + obj_index))
def write_initial(self, msg, obj_index):
if msg is None:
return
return self._write_noansi(msg, obj_index, '')
def _write_ansi(self, msg, obj_index, status):
self.lock.acquire()
position = self.lines.index(msg + obj_index)
diff = len(self.lines) - position
# move up
self.stream.write("%c[%dA" % (27, diff))
# erase
self.stream.write("%c[2K\r" % 27)
self.stream.write("{:<{width}} ... {}\r".format(msg + ' ' + obj_index,
status, width=self.width))
# move back down
self.stream.write("%c[%dB" % (27, diff))
self.stream.flush()
self.lock.release()
def _write_noansi(self, msg, obj_index, status):
self.stream.write(
"{:<{width}} ... {}\r\n".format(
msg + ' ' + obj_index, status, width=self.width
)
)
self.stream.flush()
def write(self, msg, obj_index, status, color_func):
if msg is None:
return
if self.noansi:
self._write_noansi(msg, obj_index, status)
else:
self._write_ansi(msg, obj_index, color_func(status))
def get_stream_writer():
instance = ParallelStreamWriter.instance
if instance is None:
raise RuntimeError('ParallelStreamWriter has not yet been instantiated')
return instance
def parallel_operation(containers, operation, options, message):
parallel_execute(
containers,
operator.methodcaller(operation, **options),
operator.attrgetter('name'),
message,
)
def parallel_remove(containers, options):
stopped_containers = [c for c in containers if not c.is_running]
parallel_operation(stopped_containers, 'remove', options, 'Removing')
def parallel_pause(containers, options):
parallel_operation(containers, 'pause', options, 'Pausing')
def parallel_unpause(containers, options):
parallel_operation(containers, 'unpause', options, 'Unpausing')
def parallel_kill(containers, options):
parallel_operation(containers, 'kill', options, 'Killing')
| 31.031429 | 103 | 0.638523 |
acf9e633770abad168db4413be9f057b20088ce5 | 5,081 | py | Python | python_modules/dagster-test/dagster_test/graph_job_op_toys/asset_lineage.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-test/dagster_test/graph_job_op_toys/asset_lineage.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | python_modules/dagster-test/dagster_test/graph_job_op_toys/asset_lineage.py | kstennettlull/dagster | dd6f57e170ff03bf145f1dd1417e0b2c3156b1d6 | [
"Apache-2.0"
] | null | null | null | import datetime
import os
import random
import string
import warnings
import pandas as pd
from dagster import (
Array,
AssetKey,
ExperimentalWarning,
Field,
MetadataEntry,
MetadataValue,
Out,
Output,
Partition,
PartitionSetDefinition,
graph,
op,
)
from dagster.core.storage.fs_io_manager import PickledObjectFilesystemIOManager
from dagster.core.storage.io_manager import io_manager
warnings.filterwarnings("ignore", category=ExperimentalWarning)
def get_date_partitions():
"""Every day in 2020"""
d1 = datetime.date(2020, 1, 1)
d2 = datetime.date(2021, 1, 1)
days = [d1 + datetime.timedelta(days=x) for x in range((d2 - d1).days + 1)]
return [Partition(day.strftime("%Y-%m-%d")) for day in days]
def run_config_for_date_partition(partition):
date = partition.value
return {
"ops": {
"download_data": {"outputs": {"result": {"partitions": [date]}}},
"split_action_types": {
"outputs": {
"comments": {"partitions": [date]},
"reviews": {"partitions": [date]},
}
},
"top_10_comments": {"outputs": {"result": {"partitions": [date]}}},
"top_10_reviews": {"outputs": {"result": {"partitions": [date]}}},
"daily_top_action": {"outputs": {"result": {"partitions": [date]}}},
}
}
asset_lineage_partition_set = PartitionSetDefinition(
name="date_partition_set",
pipeline_name="asset_lineage_pipeline",
partition_fn=get_date_partitions,
run_config_fn_for_partition=run_config_for_date_partition,
)
def metadata_for_actions(df):
return {
"min_score": int(df["score"].min()),
"max_score": int(df["score"].max()),
"sample rows": MetadataValue.md(df[:5].to_markdown()),
}
class MyDatabaseIOManager(PickledObjectFilesystemIOManager):
def _get_path(self, context):
keys = context.get_output_identifier()
return os.path.join("/tmp", *keys)
def handle_output(self, context, obj):
super().handle_output(context, obj)
# can pretend this actually came from a library call
yield MetadataEntry(
label="num rows written to db", description=None, entry_data=MetadataValue.int(len(obj))
)
def get_output_asset_key(self, context):
return AssetKey(
[
"my_database",
context.metadata["table_name"],
]
)
def get_output_asset_partitions(self, context):
return set(context.config.get("partitions", []))
@io_manager(output_config_schema={"partitions": Field(Array(str), is_required=False)})
def my_db_io_manager(_):
return MyDatabaseIOManager()
@op(out=Out(io_manager_key="my_db_io_manager", metadata={"table_name": "raw_actions"}))
def download_data():
n_entries = random.randint(100, 1000)
def user_id():
return "".join(random.choices(string.ascii_uppercase, k=10))
# generate some random data
data = {
"user_id": [user_id() for i in range(n_entries)],
"action_type": [
random.choices(["story", "comment"], [0.15, 0.85])[0] for i in range(n_entries)
],
"score": [random.randint(0, 10000) for i in range(n_entries)],
}
df = pd.DataFrame.from_dict(data)
yield Output(df, metadata=metadata_for_actions(df))
@op(
out={
"reviews": Out(io_manager_key="my_db_io_manager", metadata={"table_name": "reviews"}),
"comments": Out(io_manager_key="my_db_io_manager", metadata={"table_name": "comments"}),
}
)
def split_action_types(df):
reviews_df = df[df["action_type"] == "story"]
comments_df = df[df["action_type"] == "comment"]
yield Output(
reviews_df,
"reviews",
metadata=metadata_for_actions(reviews_df),
)
yield Output(comments_df, "comments", metadata=metadata_for_actions(comments_df))
def best_n_actions(n, action_type):
@op(
name=f"top_{n}_{action_type}",
out=Out(
io_manager_key="my_db_io_manager",
metadata={"table_name": f"best_{action_type}"},
),
)
def _best_n_actions(df):
df = df.nlargest(n, "score")
return Output(
df,
metadata={"data": MetadataValue.md(df.to_markdown())},
)
return _best_n_actions
top_10_reviews = best_n_actions(10, "reviews")
top_10_comments = best_n_actions(10, "comments")
@op(
out=Out(
io_manager_key="my_db_io_manager",
metadata={"table_name": "daily_best_action"},
)
)
def daily_top_action(df1, df2):
df = pd.concat([df1, df2]).nlargest(1, "score")
return Output(df, metadata={"data": MetadataValue.md(df.to_markdown())})
@graph
def asset_lineage():
reviews, comments = split_action_types(download_data())
daily_top_action(top_10_reviews(reviews), top_10_comments(comments))
asset_lineage_job = asset_lineage.to_job(resource_defs={"my_db_io_manager": my_db_io_manager})
warnings.resetwarnings()
| 27.917582 | 100 | 0.636883 |
acf9e719ec1d6c5b270a98dcae92d59caadb7797 | 928 | py | Python | sahp/sahp_training/utils/save_model.py | yangalan123/anhp-andtt | b907f3808ed2ce1616edb1bc2229993a6742cee9 | [
"MIT"
] | 16 | 2022-01-05T15:34:49.000Z | 2022-02-28T02:17:03.000Z | sahp/sahp_training/utils/save_model.py | yangalan123/anhp-andtt | b907f3808ed2ce1616edb1bc2229993a6742cee9 | [
"MIT"
] | 1 | 2022-01-15T07:58:36.000Z | 2022-01-16T03:30:42.000Z | sahp/sahp_training/utils/save_model.py | yangalan123/anhp-andtt | b907f3808ed2ce1616edb1bc2229993a6742cee9 | [
"MIT"
] | 1 | 2022-01-04T02:23:48.000Z | 2022-01-04T02:23:48.000Z | import json
import os
import torch
# SAVED_MODELS_PATH = "saved_models"
def save_model(model: torch.nn.Module, chosen_data_file, extra_tag, hidden_size, now_timestamp, SAVED_MODELS_PATH, model_name=None):
if model_name is None:
model_name = model.__class__.__name__
filename_base = "{}-{}_hidden{}-{}".format(
model_name, extra_tag,
hidden_size, now_timestamp)
filename_model_save = filename_base + ".pth"
model_filepath = os.path.join(SAVED_MODELS_PATH, filename_model_save)
print("Saving models to: {}".format(model_filepath))
torch.save(model.state_dict(), model_filepath)
file_correspondance = {
"model_path": model_filepath,
"data_path": chosen_data_file
}
print(file_correspondance)
with open(os.path.join(SAVED_MODELS_PATH, "train_data_correspondance.jsonl"), "a") as f:
json.dump(file_correspondance, f)
f.write('\n')
| 32 | 132 | 0.706897 |
acf9e7918ba06e8fe8a0eb882e3056afda2fd87c | 351 | py | Python | tests/vault-controller/test_VaultControllerEvents.py | DryptoBZX/contractsV2 | 3ee0b7669902ff6b9422440289ddc52f679e636b | [
"Apache-2.0"
] | 177 | 2020-06-13T01:41:04.000Z | 2022-03-28T06:26:53.000Z | tests/vault-controller/test_VaultControllerEvents.py | DryptoBZX/contractsV2 | 3ee0b7669902ff6b9422440289ddc52f679e636b | [
"Apache-2.0"
] | 31 | 2020-08-14T14:30:37.000Z | 2022-03-15T15:36:25.000Z | tests/vault-controller/test_VaultControllerEvents.py | DryptoBZX/contractsV2 | 3ee0b7669902ff6b9422440289ddc52f679e636b | [
"Apache-2.0"
] | 38 | 2020-06-24T22:24:40.000Z | 2022-03-26T00:27:14.000Z | #!/usr/bin/python3
import pytest
# I can't test this right now. lets leave it for the future
# def test_vaultEtherDeposit(Constants, bzx):
# assert False
# def test_vaultEtherWithdraw(Constants, bzx):
# assert False
# def test_vaultDeposit(Constants, bzx):
# assert False
# def test_vaultWithdraw(Constants, bzx):
# assert False
| 19.5 | 59 | 0.720798 |
acf9e979750447850c6656a1a360177af8762414 | 3,833 | py | Python | crdt/src/crdt/ordered_list/ll_ordered_list.py | Dgleish/PartIIProj | 617080e3b2fffd73b07de4fa59aca7ffb2389bac | [
"MIT"
] | null | null | null | crdt/src/crdt/ordered_list/ll_ordered_list.py | Dgleish/PartIIProj | 617080e3b2fffd73b07de4fa59aca7ffb2389bac | [
"MIT"
] | null | null | null | crdt/src/crdt/ordered_list/ll_ordered_list.py | Dgleish/PartIIProj | 617080e3b2fffd73b07de4fa59aca7ffb2389bac | [
"MIT"
] | null | null | null | from crdt.crdt_exceptions import VertexNotFound
from crdt.ordered_list.base_ordered_list import BaseOrderedList, Node
class LLOrderedList(BaseOrderedList):
def __init__(self, puid):
super().__init__(puid)
self.head = Node(None, flag='START')
self.head.next_node = Node(None, flag='END')
self.nodes = {}
def __len__(self):
total_len = sum(len(n.id.value) * 8 for n in self.nodes.values())
return total_len
def get_head(self):
return self.head
def lookup(self, vertex_id):
# special condition for representation of start node
if vertex_id is None:
return self.head
# will throw KeyError if not found
try:
node = self.nodes[vertex_id]
return node
except KeyError:
raise VertexNotFound(vertex_id)
def successor(self, vertex_id, only_active=False):
succ = self.lookup(vertex_id).next_node
if only_active:
while succ is not None and succ.deleted:
succ = succ.next_node
# if reached the end, return last id again
if succ is None or succ.end_node:
return vertex_id
return succ.id
def predecessor(self, vertex_id, only_active=False):
pred = self.lookup(vertex_id).prev_node
if only_active:
while pred is not None and pred.deleted:
pred = pred.prev_node
# if reached the beginning, return id for start of list (= None)
if pred is None or pred.start_node:
return None
return pred.id
def insert(self, left_id, new_vertex):
a, new_id = new_vertex
l_id = left_id
r_id = self.successor(left_id)
# Determine where to insert after specified vertex (gives total ordering)
while r_id != l_id and new_id < r_id:
l_id, r_id = r_id, self.successor(r_id)
# Is this vertex new to the list?
if r_id != new_id:
# If so insert it
left_node = self.lookup(l_id)
# create node with that data
new_node = Node(new_vertex)
# insert after 'left_id'
tmp = left_node.next_node
left_node.next_node = new_node
new_node.next_node = tmp
new_node.prev_node = left_node
if tmp is not None:
tmp.prev_node = new_node
# add to nodes lookup table
_, cl = new_vertex
self.nodes[cl] = new_node
return left_id, (a, new_id)
def delete(self, vertex_id):
# mark deleted
node = self.lookup(vertex_id)
node.deleted = True
prev_node = self.lookup(self.predecessor(vertex_id, only_active=True))
return (node.atom, vertex_id), prev_node.id
# for pretty printing
def get_repr(self, cursor):
list_repr = []
cursor_pos = 0
cursor_counter = 0
curr = self.head.next_node
while curr is not None:
if (not curr.deleted) and curr.contents is not None:
list_repr.append(curr.atom)
cursor_counter += 1
if curr.id == cursor:
cursor_pos = cursor_counter
curr = curr.next_node
if cursor is None:
cursor_pos = 0
return ''.join(list_repr), cursor_pos
# for debug purposes
def get_detailed_repr(self):
list_repr = []
curr = self.head.next_node
while curr is not None:
if curr.contents is not None:
if curr.deleted:
list_repr.append('(!!D{}D!!)'.format(str(curr.contents)))
else:
list_repr.append(str(curr.contents))
curr = curr.next_node
return ''.join(list_repr)
| 31.162602 | 81 | 0.579703 |
acf9eaac6d22e68704b85c6f6676babe50671fc0 | 10,234 | py | Python | hummingbird/ml/operator_converters/onnx/tree_ensemble.py | qin-xiong/hummingbird | 95bc97e220f3c4eaeee17c425b0d287c689ca6aa | [
"MIT"
] | null | null | null | hummingbird/ml/operator_converters/onnx/tree_ensemble.py | qin-xiong/hummingbird | 95bc97e220f3c4eaeee17c425b0d287c689ca6aa | [
"MIT"
] | null | null | null | hummingbird/ml/operator_converters/onnx/tree_ensemble.py | qin-xiong/hummingbird | 95bc97e220f3c4eaeee17c425b0d287c689ca6aa | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""
Converters for ONNX-ML tree-ensemble models.
"""
import numpy as np
from onnxconverter_common.registration import register_converter
from .. import constants
from .._gbdt_commons import convert_gbdt_classifier_common, convert_gbdt_common
from .._tree_commons import TreeParameters, convert_decision_ensemble_tree_common, get_parameters_for_tree_trav_common
def _get_tree_infos_from_onnx_ml_operator(model):
"""
Function used to extract the parameters from a ONNXML TreeEnsemble model.
"""
tree_infos = []
left = right = features = values = threshold = None
tree_ids = target_node_ids = target_tree_ids = modes = None
classes = post_transform = None
# The list of attributes is a merge between the classifier and regression operators.
# The operators descriptions can be found here
# https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md#aionnxmltreeensembleclassifier and
# here https://github.com/onnx/onnx/blob/master/docs/Operators-ml.md#aionnxmltreeensembleregressor
for attr in model.origin.attribute:
if attr.name == "nodes_falsenodeids":
right = attr.ints
elif attr.name == "nodes_truenodeids":
left = attr.ints
elif attr.name == "nodes_featureids":
features = attr.ints
elif attr.name == "nodes_values":
threshold = attr.floats
elif attr.name == "class_weights" or attr.name == "target_weights":
values = attr.floats
elif attr.name == "class_nodeids" or attr.name == "target_nodeids":
target_node_ids = attr.ints
elif attr.name == "class_treeids" or attr.name == "target_treeids":
target_tree_ids = attr.ints
elif attr.name == "nodes_treeids":
tree_ids = attr.ints
elif attr.name == "classlabels_int64s":
classes = list(attr.ints)
elif attr.name == "classlabels_strings ":
if len(attr.strings) > 0:
raise AssertionError("String class labels not supported yet.")
elif attr.name == "post_transform":
post_transform = attr.s.decode("utf-8")
if post_transform not in ["NONE", "LOGISTIC", "SOFTMAX"]:
raise AssertionError("Post transform {} not supported".format(post_transform))
elif attr.name == "nodes_modes":
modes = attr.strings
for mode in modes:
if (not mode == b"BRANCH_LEQ") and (not mode == b"LEAF"):
raise AssertionError("Modality {} not supported".format(mode))
is_decision_tree = post_transform == "NONE"
# Order values based on target node and tree ids.
new_values = []
n_classes = 1 if classes is None or not is_decision_tree else len(classes)
j = 0
for i in range(max(target_tree_ids) + 1):
k = j
while k < len(target_tree_ids) and target_tree_ids[k] == i:
k += 1
target_ids = target_node_ids[j:k]
target_ids_zipped = dict(zip(target_ids, range(len(target_ids))))
for key in sorted(target_ids_zipped):
if is_decision_tree and n_classes > 2: # For multiclass we have 2d arrays.
tmp_values = []
for c in range(n_classes):
tmp_values.append(values[j + c + (target_ids_zipped[key] - (n_classes - 1))])
new_values.append(tmp_values)
else:
new_values.append(values[j + target_ids_zipped[key]])
j = k
values = new_values
i = 0
prev_id = 0
count = 0
l_count = 0
for n, id in enumerate(tree_ids):
if id == i:
if modes[n] == b"LEAF":
left[n] = -1
right[n] = -1
threshold[n] = -1
else:
t_left = left[prev_id:count]
t_right = right[prev_id:count]
t_features = features[prev_id:count]
t_threshold = threshold[prev_id:count]
t_values = np.zeros((len(t_left), n_classes)) if is_decision_tree else np.zeros(len(t_left))
if len(t_left) == 1:
# Model creating trees with just a single leaf node. We transform it
# to a model with one internal node.
t_left = [1, -1, -1]
t_right = [2, -1, -1]
t_features = [0, 0, 0]
t_threshold = [0, -1, -1]
if l_count < len(values):
t_values[0] = values[l_count]
l_count += 1
else:
for j in range(len(t_left)):
if t_threshold[j] == -1 and l_count < len(values):
t_values[j] = values[l_count]
l_count += 1
if t_values.shape[0] == 1:
# Model creating trees with just a single leaf node. We fix the values here.
n_classes = t_values.shape[1]
t_values = np.array([np.array([0.0]), t_values[0], t_values[0]])
t_values.reshape(3, n_classes)
if is_decision_tree and n_classes == 2: # We need to fix the probabilities in this case.
for k in range(len(t_left)):
prob = (1 / (max(tree_ids) + 1)) - t_values[k][1]
t_values[k][0] = prob
tree_infos.append(
TreeParameters(t_left, t_right, t_features, t_threshold, np.array(t_values).reshape(-1, n_classes))
)
prev_id = count
i += 1
count += 1
t_left = left[prev_id:count]
t_right = right[prev_id:count]
t_features = features[prev_id:count]
t_threshold = threshold[prev_id:count]
t_values = np.zeros((len(t_left), n_classes)) if is_decision_tree else np.zeros(len(t_left))
if len(t_left) == 1:
# Model creating trees with just a single leaf node. We transform it
# to a model with one internal node.
t_left = [1, -1, -1]
t_right = [2, -1, -1]
t_features = [0, 0, 0]
t_threshold = [0, -1, -1]
if l_count < len(values):
t_values[0] = values[l_count]
l_count += 1
else:
for j in range(len(t_left)):
if t_threshold[j] == -1 and l_count < len(values):
t_values[j] = values[l_count]
l_count += 1
if t_values.shape[0] == 1:
# Model creating trees with just a single leaf node. We fix the values here.
n_classes = t_values.shape[1]
t_values = np.array([np.array([0.0]), t_values[0], t_values[0]])
t_values.reshape(3, n_classes)
if is_decision_tree and n_classes == 2: # We need to fix the probabilities in this case.
for k in range(len(t_left)):
prob = (1 / (max(tree_ids) + 1)) - t_values[k][1]
t_values[k][0] = prob
tree_infos.append(TreeParameters(t_left, t_right, t_features, t_threshold, np.array(t_values).reshape(-1, n_classes)))
return tree_infos, classes, post_transform
def _dummy_get_parameter(tree_info):
"""
Dummy function used to return parameters (TreeEnsemble converters already have parameters in the right format)
"""
return tree_info
def _get_tree_infos_from_tree_ensemble(operator, device=None, extra_config={}):
"""
Base method for extracting parameters from `ai.onnx.ml.TreeEnsemble`s.
"""
assert (
constants.N_FEATURES in extra_config
), "Cannot retrive the number of features. Please fill an issue at https://github.com/microsoft/hummingbird."
# Get the number of features.
n_features = extra_config[constants.N_FEATURES]
tree_infos, classes, post_transform = _get_tree_infos_from_onnx_ml_operator(operator)
# Get tree informations from the operator.
return n_features, tree_infos, classes, post_transform
def convert_onnx_tree_ensemble_classifier(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.ml.TreeEnsembleClassifier`.
Args:
operator: An operator wrapping a `ai.onnx.ml.TreeEnsembleClassifier` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
# Get tree informations from the operator.
n_features, tree_infos, classes, post_transform = _get_tree_infos_from_tree_ensemble(
operator.raw_operator, device, extra_config
)
# Generate the model.
if post_transform == "NONE":
return convert_decision_ensemble_tree_common(
tree_infos, _dummy_get_parameter, get_parameters_for_tree_trav_common, n_features, classes, extra_config
)
extra_config[constants.POST_TRANSFORM] = post_transform
return convert_gbdt_classifier_common(tree_infos, _dummy_get_parameter, n_features, len(classes), classes, extra_config)
def convert_onnx_tree_ensemble_regressor(operator, device=None, extra_config={}):
"""
Converter for `ai.onnx.ml.TreeEnsembleRegressor`.
Args:
operator: An operator wrapping a `ai.onnx.ml.TreeEnsembleRegressor` model
device: String defining the type of device the converted operator should be run on
extra_config: Extra configuration used to select the best conversion strategy
Returns:
A PyTorch model
"""
assert operator is not None
# Get tree informations from the operator.
n_features, tree_infos, _, _ = _get_tree_infos_from_tree_ensemble(operator.raw_operator, device, extra_config)
# Generate the model.
return convert_gbdt_common(tree_infos, _dummy_get_parameter, n_features, extra_config=extra_config)
register_converter("ONNXMLTreeEnsembleClassifier", convert_onnx_tree_ensemble_classifier)
register_converter("ONNXMLTreeEnsembleRegressor", convert_onnx_tree_ensemble_regressor)
| 42.289256 | 124 | 0.627321 |
acf9eb5233148008a65cb9ce08908b5c5009215a | 428 | py | Python | Preprocessing/weekly_preprocessing.py | centre-for-humanities-computing/hope_dataprep | 77e23256e8bd429b904b15d236b2110475c51bbf | [
"MIT"
] | null | null | null | Preprocessing/weekly_preprocessing.py | centre-for-humanities-computing/hope_dataprep | 77e23256e8bd429b904b15d236b2110475c51bbf | [
"MIT"
] | null | null | null | Preprocessing/weekly_preprocessing.py | centre-for-humanities-computing/hope_dataprep | 77e23256e8bd429b904b15d236b2110475c51bbf | [
"MIT"
] | null | null | null | """
This script run the preprocessing / lang extract once a week
"""
from datetime import date
import os
import time
is_run = False
while True:
time.sleep(60*60) # wait one hour
if date.today().weekday() == 1: # monday = 0, tuesday = 1 ...
if is_run is False:
is_run = True
os.system('python3 extract_nordic_tweets.py')
else:
pass
else:
is_run = False
| 21.4 | 66 | 0.591121 |
acf9ebd7bff08e1fe233e9b5e31845bf7aed7a61 | 17,164 | py | Python | canmatrix/compare.py | answer000000/can-matrix | 270f167b270cfefc623ce44e49421bec75ad0926 | [
"BSD-2-Clause"
] | 1 | 2020-12-07T02:11:27.000Z | 2020-12-07T02:11:27.000Z | canmatrix/compare.py | answer000000/can-matrix | 270f167b270cfefc623ce44e49421bec75ad0926 | [
"BSD-2-Clause"
] | null | null | null | canmatrix/compare.py | answer000000/can-matrix | 270f167b270cfefc623ce44e49421bec75ad0926 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2013, Eduard Broecker
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that
# the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list of conditions and the
# following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
from __future__ import print_function
from __future__ import absolute_import
from .log import setup_logger, set_log_level
logger = setup_logger('root')
import sys
class compareResult(object):
def __init__(self, result=None, mtype=None, ref=None, changes=None):
# equal, added, deleted, changed
self._result = result
# db, bu, frame, signal, attribute
self._type = mtype
# reference to related object
self._ref = ref
self._changes = changes
self._children = []
def addChild(self, child):
self._children.append(child)
def propagateChanges(res):
change = 0
for child in res._children:
change += propagateChanges(child)
if change != 0:
res._result = "changed"
if res._result != "equal":
return 1
else:
return 0
def compareDb(db1, db2, ignore=None):
result = compareResult()
for f1 in db1.frames:
f2 = db2.frameById(f1._Id)
if f2 is None:
result.addChild(compareResult("deleted", "FRAME", f1))
else:
result.addChild(compareFrame(f1, f2, ignore))
for f2 in db2.frames:
f1 = db1.frameById(f2._Id)
if f1 is None:
result.addChild(compareResult("added", "FRAME", f2))
if ignore is not None and "ATTRIBUTE" in ignore and ignore[
"ATTRIBUTE"] == "*":
pass
else:
result.addChild(compareAttributes(db1, db2, ignore))
for bu1 in db1.boardUnits:
bu2 = db2.boardUnitByName(bu1.name)
if bu2 is None:
result.addChild(compareResult("deleted", "ecu", bu1))
else:
result.addChild(compareBu(bu1, bu2, ignore))
for bu2 in db2.boardUnits:
bu1 = db1.boardUnitByName(bu2.name)
if bu1 is None:
result.addChild(compareResult("added", "ecu", bu2))
if ignore is not None and "DEFINE" in ignore and ignore["DEFINE"] == "*":
pass
else:
result.addChild(
compareDefineList(
db1.globalDefines,
db2.globalDefines))
temp = compareDefineList(db1.buDefines, db2.buDefines)
temp._type = "ECU Defines"
result.addChild(temp)
temp = compareDefineList(db1.frameDefines, db2.frameDefines)
temp._type = "Frame Defines"
result.addChild(temp)
temp = compareDefineList(db1.signalDefines, db2.signalDefines)
temp._type = "Signal Defines"
result.addChild(temp)
for vt1 in db1.valueTables:
if vt1 not in db2.valueTables:
result.addChild(
compareResult(
"deleted",
"valuetable " + vt1,
db1._valueTables))
else:
result.addChild(
compareValueTable(
db1._valueTables[vt1],
db2._valueTables[vt1]))
for vt2 in db2.valueTables:
if vt2 not in db1.valueTables:
result.addChild(
compareResult(
"added",
"valuetable " + vt2,
db2._valueTables))
propagateChanges(result)
return result
def compareValueTable(vt1, vt2):
result = compareResult("equal", "Valuetable", vt1)
for value in vt1:
if value not in vt2:
result.addChild(
compareResult(
"removed",
"Value " +
str(value),
vt1[value]))
elif vt1[value] != vt2[value]:
result.addChild(compareResult("changed", "Value " +
str(value) +
" " +
str(vt1[value]), [vt1[value], vt2[value]]))
for value in vt2:
if value not in vt1:
result.addChild(
compareResult(
"added",
"Value " +
str(value),
vt2[value]))
return result
def compareSignalGroup(sg1, sg2):
result = compareResult("equal", "SignalGroup", sg1)
if sg1.name != sg2.name:
result.addChild(
compareResult(
"changed", "SignalName", [
sg1.name, sg2.name]))
if sg1.id != sg2.id:
result.addChild(compareResult(
"changed", "SignalName", [str(sg1.id), str(sg2.id)]))
if sg1.signals is None or sg2.signals is None:
logger.debug("Strange - sg wo members???")
return result
for member in sg1.signals:
if sg2.byName(member.name) is None:
result.addChild(compareResult("deleted", str(member.name), member))
for member in sg2.signals:
if sg1.byName(member.name) is None:
result.addChild(compareResult("added", str(member.name), member))
return result
def compareDefineList(d1list, d2list):
result = compareResult("equal", "DefineList", d1list)
for definition in d1list:
if definition not in d2list:
result.addChild(
compareResult(
"deleted",
"Define" +
str(definition),
d1list))
else:
d2 = d2list[definition]
d1 = d1list[definition]
if d1.definition != d2.definition:
result.addChild(
compareResult(
"changed", "Definition", d1.definition, [
d1.definition, d2.definition]))
if d1.defaultValue != d2.defaultValue:
result.addChild(
compareResult(
"changed", "DefaultValue", d1.definition, [
d1.defaultValue, d2.defaultValue]))
for definition in d2list:
if definition not in d1list:
result.addChild(
compareResult(
"added",
"Define" +
str(definition),
d2list))
return result
def compareAttributes(ele1, ele2, ignore=None):
result = compareResult("equal", "ATTRIBUTES", ele1)
if ignore is not None and "ATTRIBUTE" in ignore and (
ignore["ATTRIBUTE"] == "*" or ignore["ATTRIBUTE"] == ele1):
return result
for attribute in ele1.attributes:
if attribute not in ele2.attributes:
result.addChild(
compareResult(
"deleted",
str(attribute),
ele1.attributes[attribute]))
elif ele1.attributes[attribute] != ele2.attributes[attribute]:
result.addChild(
compareResult(
"changed", str(attribute), ele1.attributes[attribute], [
ele1.attributes[attribute], ele2.attributes[attribute]]))
for attribute in ele2.attributes:
if attribute not in ele1.attributes:
result.addChild(
compareResult(
"added",
str(attribute),
ele2.attributes[attribute]))
return result
def compareBu(bu1, bu2, ignore=None):
result = compareResult("equal", "ECU", bu1)
if bu1.comment != bu2.comment:
result.addChild(
compareResult(
"changed", "ECU", bu1, [
bu1.comment, bu2.comment]))
if ignore is not None and "ATTRIBUTE" in ignore and ignore[
"ATTRIBUTE"] == "*":
pass
else:
result.addChild(compareAttributes(bu1, bu2, ignore))
return result
def compareFrame(f1, f2, ignore=None):
result = compareResult("equal", "FRAME", f1)
for s1 in f1:
s2 = f2.signalByName(s1.name)
if not s2:
result.addChild(compareResult("deleted", "SIGNAL", s1))
else:
result.addChild(compareSignal(s1, s2, ignore))
if f1.name != f2.name:
result.addChild(
compareResult(
"changed", "Name", f1, [
f1.name, f2.name]))
if f1.size != f2.size:
result.addChild(
compareResult(
"changed", "dlc", f1, [
"dlc: %d" %
f1.size, "dlc: %d" %
f2.size]))
if f1.extended != f2.extended:
result.addChild(
compareResult(
"changed", "FRAME", f1, [
"extended-Flag: %d" %
f1._extended, "extended-Flag: %d" %
f2._extended]))
if f2.comment is None:
f2.addComment("")
if f1.comment is None:
f1.addComment("")
if f1.comment != f2.comment:
result.addChild(
compareResult(
"changed", "FRAME", f1, [
"comment: " + f1.comment, "comment: " + f2.comment]))
for s2 in f2._signals:
s1 = f1.signalByName(s2.name)
if not s1:
result.addChild(compareResult("added", "SIGNAL", s2))
if ignore is not None and "ATTRIBUTE" in ignore and ignore[
"ATTRIBUTE"] == "*":
pass
else:
result.addChild(compareAttributes(f1, f2, ignore))
for transmitter in f1.transmitter:
if transmitter not in f2.transmitter:
result.addChild(compareResult("removed", "Frame-Transmitter", f1))
for transmitter in f2.transmitter:
if transmitter not in f1.transmitter:
result.addChild(compareResult("added", "Frame-Transmitter", f2))
for sg1 in f1.SignalGroups:
sg2 = f2.signalGroupbyName(sg1.name)
if sg2 is None:
result.addChild(compareResult("removed", "Signalgroup", sg1))
else:
result.addChild(compareSignalGroup(sg1, sg2))
for sg2 in f2._SignalGroups:
if f1.signalGroupbyName(sg2.name) is None:
result.addChild(compareResult("added", "Signalgroup", sg1))
return result
def compareSignal(s1, s2, ignore=None):
result = compareResult("equal", "SIGNAL", s1)
if s1._startbit != s2._startbit:
result.addChild(
compareResult(
"changed", "startbit", s1, [
" %d" %
s1._startbit, " %d" %
s2._startbit]))
if s1._signalsize != s2._signalsize:
result.addChild(
compareResult(
"changed", "signalsize", s1, [
" %d" %
s1._signalsize, " %d" %
s2._signalsize]))
if float(s1._factor) != float(s2._factor):
result.addChild(
compareResult(
"changed", "factor", s1, [
s1._factor, s2._factor]))
if float(s1._offset) != float(s2._offset):
result.addChild(
compareResult(
"changed", "offset", s1, [
s1._offset, s2._offset]))
if float(s1._min) != float(s2._min):
result.addChild(
compareResult(
"changed", "min", s1, [
s1._min, s2._min]))
if float(s1._max) != float(s2._max):
result.addChild(
compareResult(
"changed", "max", s1, [
s1._max, s2._max]))
if s1._is_little_endian != s2._is_little_endian:
result.addChild(
compareResult(
"changed", "is_little_endian", s1, [
" %d" %
s1._is_little_endian, " %d" %
s2._is_little_endian]))
if s1._is_signed != s2._is_signed:
result.addChild(
compareResult(
"changed", "sign", s1, [
" %d" %
s1._is_signed, " %d" %
s2._is_signed]))
if s1._multiplex != s2._multiplex:
result.addChild(compareResult("changed", "multiplex", s1, [
str(s1._multiplex), str(s2._multiplex)]))
if s1._unit != s2._unit:
result.addChild(
compareResult(
"changed", "unit", s1, [
s1._unit, s2._unit]))
if s1.comment is not None and s2.comment is not None and s1.comment != s2.comment:
if s1.comment.replace("\n", " ") != s2.comment.replace("\n", " "):
result.addChild(
compareResult(
"changed", "comment", s1, [
s1.comment, s2.comment]))
else:
result.addChild(
compareResult(
"changed", "comment", s1, [
"only whitespaces differ", ""]))
for receiver in s1._receiver:
if receiver not in s2._receiver:
result.addChild(
compareResult(
"removed",
"receiver " +
receiver,
s1._receiver))
for receiver in s2._receiver:
if receiver not in s1._receiver:
result.addChild(
compareResult(
"added",
"receiver " +
receiver,
s1._receiver))
if ignore is not None and "ATTRIBUTE" in ignore and ignore[
"ATTRIBUTE"] == "*":
pass
else:
result.addChild(compareAttributes(s1, s2, ignore))
result.addChild(compareValueTable(s1._values, s2._values))
return result
def dumpResult(res, depth=0):
if res._type is not None and res._result != "equal":
for _ in range(0, depth):
print(" ", end=' ')
print(res._type + " " + res._result + " ", end=' ')
if hasattr(res._ref, 'name'):
print(res._ref.name)
else:
print(" ")
if res._changes is not None and res._changes[
0] is not None and res._changes[1] is not None:
for _ in range(0, depth):
print(" ", end=' ')
print("old: " +
str(res._changes[0]).encode('ascii', 'replace') +
" new: " +
str(res._changes[1]).encode('ascii', 'replace'))
for child in res._children:
dumpResult(child, depth + 1)
def main():
from optparse import OptionParser
usage = """
%prog [options] cancompare matrix1 matrix2
matrixX can be any of *.dbc|*.dbf|*.kcd|*.arxml
"""
parser = OptionParser(usage=usage)
parser.add_option(
"-s",
dest="silent",
action="store_true",
help="don't print status messages to stdout. (only errors)",
default=False)
parser.add_option(
"-v",
dest="verbosity",
action="count",
help="Output verbosity",
default=0)
(cmdlineOptions, args) = parser.parse_args()
if len(args) < 2:
parser.print_help()
sys.exit(1)
matrix1 = args[0]
matrix2 = args[1]
verbosity = cmdlineOptions.verbosity
if cmdlineOptions.silent:
# Only print ERROR messages (ignore import warnings)
verbosity = -1
set_log_level(logger, verbosity)
# import only after setting log level, to also disable warning messages in
# silent mode.
import canmatrix.formats
logger.info("Importing " + matrix1 + " ... ")
db1 = next(iter(canmatrix.formats.loadp(matrix1).values()))
logger.info("%d Frames found" % (db1._fl._list.__len__()))
logger.info("Importing " + matrix2 + " ... ")
db2 = next(iter(canmatrix.formats.loadp(matrix2).values()))
logger.info("%d Frames found" % (db2._fl._list.__len__()))
ignore = {}
#ignore["ATTRIBUTE"] = "*"
#ignore["DEFINE"] = "*"
obj = compareDb(db1, db2, ignore)
dumpResult(obj)
if __name__ == '__main__':
sys.exit(main())
| 33.263566 | 112 | 0.542939 |
acf9ebe7ca95e79f2ece4bb4a9b6f57ba1b70fc6 | 12,260 | py | Python | src/fvm/scripts/FluentCase.py | drm42/fvm-drm | c9b940e593034f1aa3020d63ff1e09ebef9c182a | [
"MIT"
] | null | null | null | src/fvm/scripts/FluentCase.py | drm42/fvm-drm | c9b940e593034f1aa3020d63ff1e09ebef9c182a | [
"MIT"
] | null | null | null | src/fvm/scripts/FluentCase.py | drm42/fvm-drm | c9b940e593034f1aa3020d63ff1e09ebef9c182a | [
"MIT"
] | null | null | null |
from fvm import importers
import SchemeParser
def isConsPairOrList(v):
return isinstance(v,SchemeParser.ConsPair) or isinstance(v,list)
def scmToPy(val):
if isinstance(val,SchemeParser.ConsPair):
if not isConsPairOrList(val[1]):
return [scmToPy(val[0]),scmToPy(val[1])]
if len(val[1]) ==0:
return scmToPy(val[0])
# elif len(val[1][1]) == 0 and str(val[1][0])[0]=='e':
# numStr = str(val[0]) + str(val[1][0])
# return eval(numStr)
else:
l = scmToPyList(val)
if l is not None:
return l
else:
return val
elif isinstance(val,SchemeParser.Symbol):
if (str(val)=='#f'):
return False
elif (str(val)=='#t'):
return True
else:
return str(val)
else:
return val
def scmToPyList(vars):
varsList = []
while len(vars) > 0 and isinstance(vars,SchemeParser.ConsPair):
val = vars[0]
if isinstance(val,SchemeParser.ConsPair):
return None
val = scmToPy(val)
varsList.append(val)
vars = vars[1]
return varsList
def scmToDict(vars):
varsDict = {}
while len(vars) > 0:
if not isinstance(vars[0],SchemeParser.ConsPair):
varsDict[str(vars[0])] = scmToPy(vars[1])
return varsDict
if isinstance(vars[0][0],SchemeParser.ConsPair):
return None
key = str(vars[0][0])
val = vars[0][1]
val = scmToPy(val)
varsDict[key] = val
vars = vars[1]
return varsDict
threadTypeToZoneType = { 1:'fluid',
2:'interior',
3:'wall',
4:'pressure-inlet',
5:'pressure-outlet',
7:'symmetric',
10:'velocity-inlet',
17:'solid'
}
def getZoneType(threadType):
if threadType in threadTypeToZoneType:
return threadTypeToZoneType[threadType]
else:
raise TypeError('invalid thread type %d' % threadType)
class FluentCase(importers.FluentReader):
class FluentZone():
def __init__(self,ID,zoneName,threadType,zoneType,varString):
self.id = ID
if zoneType == '':
zoneType = getZoneType(threadType)
if zoneName == '':
zoneName = "%s_%d" % (zoneType,ID)
self.zoneType = zoneType
self.zoneName = zoneName
if varString == "":
self.varsDict = {}
else:
self.varsDict = scmToDict(SchemeParser.parse(varString))
if 'sources?' in self.varsDict and self.varsDict['sources?']:
sourcekey = 'source-terms'
if sourcekey not in self.varsDict:
sourcekey = 'sources'
sourceVar = self.varsDict[sourcekey]
self.varsDict['source-terms'] = scmToDict(sourceVar)
def getVar(self,v):
return self.varsDict[v]
def getConstantVar(self,v):
val = self.varsDict[v]
if not isinstance(val,list):
return val
if isinstance(val[0],list):
val = val[0]
if val[0] == 'constant':
return val[1]
else:
raise ValueError(v + ' value is not constant')
def getConstantSource(self,v):
val = self.varsDict['source-terms'][v]
if isinstance(val[0],list):
val = val[0]
if val[0] == 'constant':
return val[1]
else:
raise ValueError('source ' + v + ' value is not constant')
class Material:
def __init__(self,materialType,props):
self.materialType = materialType
self.props = props
def getPropMethod(self,p):
val = self.props[p]
if isinstance(val[0],list):
val=val[0]
return val[0]
def getPropData(self,p):
val = self.props[p]
if isinstance(val[0],list):
val=val[0]
return val[1]
def getConstantProp(self,p):
val = self.props[p]
if isinstance(val[0],list):
val=val[0]
if val[0] == 'constant':
return val[1]
else:
raise ValueError(' %s value is not constant: %s' % (p,val) )
def getProp(self,p,tField):
""" allows for constant or polynomial of tField"""
val = self.props[p]
if isinstance(val[0],list):
val=val[0]
if val[0] == 'constant':
return val[1]
elif val[0] == 'polynomial':
return Polynomial(scmToPy(val[1]),field=tField)
#return PyArrayUDFPolynomial(coeffs=scmToPy(val[1]),xField=tField)
else:
raise ValueError(' %s value is not constant or polynomial: %s' % (p,val) )
def getVar(self,v):
return self.varsDict[v]
def read(self):
print 'reading mesh'
self.readMesh()
varString = self.getVars()
if varString=="":
return
vars=SchemeParser.parse(varString)
self.varsDict = scmToDict(vars)
if self.varsDict is None:
raise TypeError("vars is not an association list")
self.config = scmToDict(self.getVar('case-config'))
self.materials = {}
self.faceZones = {}
self.cellZones = {}
mDict = scmToDict(self.getVar('materials'))
for n,d in mDict.iteritems():
self.materials[n] = FluentCase.Material(materialType=scmToPy(d[0]),
props=scmToDict(d[1]))
rFaceZones = self.getFaceZones()
for i in rFaceZones.keys():
rfz = rFaceZones[i]
self.faceZones[i] = FluentCase.FluentZone(ID=rfz.ID,
zoneName=rfz.zoneName,
threadType=rfz.threadType,
zoneType=rfz.zoneType,
varString=rfz.zoneVars)
rCellZones = self.getCellZones()
for i in rCellZones.keys():
rfz = rCellZones[i]
self.cellZones[i] = FluentCase.FluentZone(ID=rfz.ID,
zoneName=rfz.zoneName,
threadType=rfz.threadType,
zoneType=rfz.zoneType,
varString=rfz.zoneVars)
def importThermalBCs(self,tmodel):
bcMap = tmodel.getBCMap()
for i in bcMap.keys():
bc = bcMap[i]
fluentZone = self.faceZones[i]
if fluentZone.zoneType == 'wall':
thermalBCType = fluentZone.getVar('thermal-bc')
if thermalBCType == 0:
bc.bcType = 'SpecifiedTemperature'
bc.setVar('specifiedTemperature',fluentZone.getConstantVar('t'))
elif thermalBCType == 1:
flux= fluentZone.getConstantVar('q')
bc.bcType = 'SpecifiedHeatFlux'
bc.setVar('specifiedHeatFlux',flux)
elif thermalBCType == 3:
bc.bcType = 'CoupledWall'
else:
raise TypeError('thermal BCType %d not handled' % thermalBCType)
elif fluentZone.zoneType in ['velocity-inlet','pressure-inlet',
'pressure-outlet',
'mass-flow-inlet',
'exhaust-fan', 'intake-fan',
'inlet-vent', 'outlet-vent']:
bc.bcType = 'SpecifiedTemperature'
if fluentZone.zoneType == 'velocity-inlet':
bc.setVar('specifiedTemperature', fluentZone.getConstantVar('t'))
else:
bc.setVar('specifiedTemperature',fluentZone.getConstantVar('t0'))
elif fluentZone.zoneType == 'symmetry':
pass
else:
raise TypeError('invalid boundary type : ' + fluentZone.zoneType)
def importFlowBCs(self,fmodel, meshes):
options = fmodel.getOptions()
options['initialXVelocity'] = self.getVar('x-velocity/default')
options['initialYVelocity']= self.getVar('y-velocity/default')
options['initialZVelocity']= self.getVar('z-velocity/default')
options['initialPressure']= self.getVar('pressure/default')
if 'initialTemperature' in options.getKeys():
options['initialTemperature']=\
self.getVar('temperature/default')
options['momentumURF']= self.getVar('mom/relax')
options['pressureURF']= self.getVar('pressure/relax')
bcMap = fmodel.getBCMap()
for i in bcMap.keys():
bc = bcMap[i]
fluentZone = self.faceZones[i]
if fluentZone.zoneType == 'wall':
motionBCType = fluentZone.getVar('motion-bc')
bc.bcType = 'NoSlipWall'
if motionBCType == 0:
pass
elif motionBCType == 1:
vmag = fluentZone.getVar('vmag')
bc['specifiedXVelocity']=vmag*fluentZone.getConstantVar('ni')
bc['specifiedYVelocity']=vmag*fluentZone.getConstantVar('nj')
bc['specifiedZVelocity']=vmag*fluentZone.getConstantVar('nk')
else:
raise TypeError('flow BCType %d not handled' % motionBCType)
elif fluentZone.zoneType == 'velocity-inlet':
motionBCType = fluentZone.getVar('velocity-spec')
bc.bcType = 'VelocityBoundary'
if motionBCType == 0:
vmag = fluentZone.getVar('vmag')
bc['specifiedXVelocity']=vmag*fluentZone.getConstantVar('ni')
bc['specifiedYVelocity']=vmag*fluentZone.getConstantVar('nj')
bc['specifiedZVelocity']=vmag*fluentZone.getConstantVar('nk')
if motionBCType == 1:
vmag = fluentZone.getVar('vmag')
bc['specifiedXVelocity']=fluentZone.getConstantVar('u')
bc['specifiedYVelocity']=fluentZone.getConstantVar('v')
bc['specifiedZVelocity']=fluentZone.getConstantVar('w')
else:
raise TypeError('flow BCType %d not handled' % motionBCType)
elif fluentZone.zoneType == 'pressure-outlet':
bc.bcType = 'PressureBoundary'
bc['specifiedPressure']=fluentZone.getConstantVar('p')
elif fluentZone.zoneType == 'pressure-inlet':
bc.bcType = 'PressureBoundary'
bc['specifiedPressure']=fluentZone.getConstantVar('p0')
elif fluentZone.zoneType == 'symmetry':
bc.bcType = 'Symmetry'
else:
raise TypeError('invalid boundary type : ' + fluentZone.zoneType)
vcMap = fmodel.getVCMap()
for mesh in meshes:
vc = vcMap[mesh.getID()]
fluentZone = self.cellZones[mesh.getCellZoneID()]
material = self.materials[fluentZone.getVar('material')]
if material.getPropMethod('density') == 'constant':
vc['density']=material.getConstantProp('density')
else:
print 'Density method is not constant. Remember to setup ideal gas density model'
vc['viscosity']=material.getConstantProp('viscosity')
| 38.553459 | 97 | 0.502365 |
acf9ec1d8e10e298812ddcb7f4b04f9b43ee9bf3 | 372 | py | Python | students/K33422/Izmaylova_Anna/web_lab1/2/2_client.py | Anna0102/ITMO_ICT_WebDevelopment_2021-2022 | 1a361329eabccefa5bd9f3d22e1b5dbdb950c85e | [
"MIT"
] | null | null | null | students/K33422/Izmaylova_Anna/web_lab1/2/2_client.py | Anna0102/ITMO_ICT_WebDevelopment_2021-2022 | 1a361329eabccefa5bd9f3d22e1b5dbdb950c85e | [
"MIT"
] | null | null | null | students/K33422/Izmaylova_Anna/web_lab1/2/2_client.py | Anna0102/ITMO_ICT_WebDevelopment_2021-2022 | 1a361329eabccefa5bd9f3d22e1b5dbdb950c85e | [
"MIT"
] | null | null | null | import socket
sock = socket.socket()
sock.connect(('localhost', 9091))
a = input("Введите a: ")
b = input("Введите b: ")
c = input("Введите c: ")
req = f"GET https://localhost?a={a}&b={b}&c={c} HTTP/1.1"
sock.send(req.encode()) # Отпрвка сообщения на сервер, encode() превращает текст в байты
data = sock.recv(1024)
print(data)
sock.close() # закрыть подключение | 24.8 | 88 | 0.663978 |
acf9ec7a06f0fd8a8826b9507b00663c18f51f02 | 508 | py | Python | alchemyst/ui/errors.py | alexdmoss/alchemyst | dba67544aa1b81c5db1f5ee29d25cf8c9d098e21 | [
"MIT"
] | null | null | null | alchemyst/ui/errors.py | alexdmoss/alchemyst | dba67544aa1b81c5db1f5ee29d25cf8c9d098e21 | [
"MIT"
] | null | null | null | alchemyst/ui/errors.py | alexdmoss/alchemyst | dba67544aa1b81c5db1f5ee29d25cf8c9d098e21 | [
"MIT"
] | null | null | null | import yaml
from datetime import datetime
from flask import render_template
from alchemyst import app
with open('app-config.yaml') as app_cfg_file:
app_cfg = yaml.load(app_cfg_file, Loader=yaml.FullLoader)
layout = app_cfg['layout']
layout['year'] = datetime.now().year
@app.errorhandler(404)
def not_found_error(error):
return render_template('404.html', layout=layout), 404
@app.errorhandler(500)
def internal_error(error):
return render_template('500.html', layout=layout), 500
| 23.090909 | 61 | 0.75 |
acf9ec913b9cc42bed057cf7b9bad45dd8c6fc8f | 2,892 | py | Python | python-telegram-bot/tests/test_helpers.py | shyguy-ry/paddingCheckBot | d0a60cc2f397b9b8e4d60bdea699a94beaff2ea1 | [
"Apache-2.0"
] | 1 | 2019-10-22T03:46:17.000Z | 2019-10-22T03:46:17.000Z | python-telegram-bot/tests/test_helpers.py | shyguy-ry/paddingCheckBot | d0a60cc2f397b9b8e4d60bdea699a94beaff2ea1 | [
"Apache-2.0"
] | null | null | null | python-telegram-bot/tests/test_helpers.py | shyguy-ry/paddingCheckBot | d0a60cc2f397b9b8e4d60bdea699a94beaff2ea1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2018
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
from telegram import Sticker
from telegram import Update
from telegram import User
from telegram.message import Message
from telegram.utils import helpers
class TestHelpers(object):
def test_escape_markdown(self):
test_str = '*bold*, _italic_, `code`, [text_link](http://github.com/)'
expected_str = '\*bold\*, \_italic\_, \`code\`, \[text\_link](http://github.com/)'
assert expected_str == helpers.escape_markdown(test_str)
def test_effective_message_type(self):
def build_test_message(**kwargs):
config = dict(
message_id=1,
from_user=None,
date=None,
chat=None,
)
config.update(**kwargs)
return Message(**config)
test_message = build_test_message(text='Test')
assert helpers.effective_message_type(test_message) == 'text'
test_message.text = None
test_message = build_test_message(sticker=Sticker('sticker_id', 50, 50))
assert helpers.effective_message_type(test_message) == 'sticker'
test_message.sticker = None
test_message = build_test_message(new_chat_members=[User(55, 'new_user', False)])
assert helpers.effective_message_type(test_message) == 'new_chat_members'
test_message = build_test_message(left_chat_member=[User(55, 'new_user', False)])
assert helpers.effective_message_type(test_message) == 'left_chat_member'
test_update = Update(1)
test_message = build_test_message(text='Test')
test_update.message = test_message
assert helpers.effective_message_type(test_update) == 'text'
empty_update = Update(2)
assert helpers.effective_message_type(empty_update) is None
def test_mention_html(self):
expected = '<a href="tg://user?id=1">the name</a>'
assert expected == helpers.mention_html(1, 'the name')
def test_mention_markdown(self):
expected = '[the name](tg://user?id=1)'
assert expected == helpers.mention_markdown(1, 'the name')
| 37.558442 | 90 | 0.687759 |
acf9ec9893a542c764b1852c38223e281aec27ca | 1,351 | py | Python | test/algorithms/popDensityMulti.py | OPAL-Project/OPAL-AlgoService | 542b39f31551a849f45f91b6d85bfa3238e21767 | [
"MIT"
] | 1 | 2020-03-04T15:38:52.000Z | 2020-03-04T15:38:52.000Z | test/algorithms/popDensityMulti.py | OPAL-Project/OPAL-AlgoService | 542b39f31551a849f45f91b6d85bfa3238e21767 | [
"MIT"
] | 9 | 2018-07-05T07:32:40.000Z | 2021-10-19T00:32:07.000Z | test/algorithms/popDensityMulti.py | OPAL-Project/OPAL-AlgoService | 542b39f31551a849f45f91b6d85bfa3238e21767 | [
"MIT"
] | 2 | 2018-09-16T17:17:38.000Z | 2019-05-09T13:25:52.000Z | # -*- coding: utf-8 -*-
"""Calculate population density."""
from opalalgorithms.core import OPALAlgorithm
import csv
import operator
import multiprocessing
class PopulationDensity(OPALAlgorithm):
"""Calculate population density."""
def __init__(self):
"""Initialize population density."""
super(PopulationDensity, self).__init__()
def map(self, user_csv_file):
"""Mapping user_csv_file to user and most used antenna."""
antennas = dict()
with open(user_csv_file, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
a = str(row[5])
if a in antennas:
antennas[a] += 1
else:
antennas[a] = 1
antenna = max(antennas.items(), key=operator.itemgetter(1))[0]
return antenna
def reduce(self, results_csv_file):
"""Convert results to count of population per antenna."""
density = dict()
with open(results_csv_file, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=' ')
for row in csv_reader:
a = str(row[1])
if a in density:
density[a] += 1
else:
density[a] = 1
return density
| 32.166667 | 70 | 0.558105 |
acf9ecceeffe46e5f7573bb1f9e3bee389afc6f9 | 6,689 | py | Python | tensorflow/tools/pip_package/setup.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 1 | 2016-11-29T07:39:54.000Z | 2016-11-29T07:39:54.000Z | tensorflow/tools/pip_package/setup.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | null | null | null | tensorflow/tools/pip_package/setup.py | atfkaka/tensorflow | 5657d0dee8d87f4594b3e5902ed3e3ca8d6dfc0a | [
"Apache-2.0"
] | 2 | 2017-11-29T19:37:41.000Z | 2019-06-15T16:54:47.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import fnmatch
import os
import re
import sys
from setuptools import find_packages, setup, Command
from setuptools.command.install import install as InstallCommandBase
from setuptools.dist import Distribution
_VERSION = '0.11.0'
REQUIRED_PACKAGES = [
'numpy >= 1.11.0',
'six >= 1.10.0',
'protobuf == 3.1.0',
]
project_name = 'tensorflow'
if '--project_name' in sys.argv:
project_name_idx = sys.argv.index('--project_name')
project_name = sys.argv[project_name_idx + 1]
sys.argv.remove('--project_name')
sys.argv.pop(project_name_idx)
# python3 requires wheel 0.26
if sys.version_info.major == 3:
REQUIRED_PACKAGES.append('wheel >= 0.26')
else:
REQUIRED_PACKAGES.append('wheel')
# mock comes with unittest.mock for python3, need to install for python2
REQUIRED_PACKAGES.append('mock >= 2.0.0')
# pylint: disable=line-too-long
CONSOLE_SCRIPTS = [
'tensorboard = tensorflow.tensorboard.tensorboard:main',
]
# pylint: enable=line-too-long
TEST_PACKAGES = [
'scipy >= 0.15.1',
]
class BinaryDistribution(Distribution):
def has_ext_modules(self):
return True
class InstallCommand(InstallCommandBase):
"""Override the dir where the headers go."""
def finalize_options(self):
ret = InstallCommandBase.finalize_options(self)
self.install_headers = os.path.join(self.install_purelib,
'tensorflow', 'include')
return ret
class InstallHeaders(Command):
"""Override how headers are copied.
The install_headers that comes with setuptools copies all files to
the same directory. But we need the files to be in a specific directory
hierarchy for -I <include_dir> to work correctly.
"""
description = 'install C/C++ header files'
user_options = [('install-dir=', 'd',
'directory to install header files to'),
('force', 'f',
'force installation (overwrite existing files)'),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = 0
self.outfiles = []
def finalize_options(self):
self.set_undefined_options('install',
('install_headers', 'install_dir'),
('force', 'force'))
def mkdir_and_copy_file(self, header):
install_dir = os.path.join(self.install_dir, os.path.dirname(header))
# Get rid of some extra intervening directories so we can have fewer
# directories for -I
install_dir = re.sub('/google/protobuf/src', '', install_dir)
# Copy eigen code into tensorflow/include.
# A symlink would do, but the wheel file that gets created ignores
# symlink within the directory hierarchy.
# NOTE(keveman): Figure out how to customize bdist_wheel package so
# we can do the symlink.
if 'external/eigen_archive/' in install_dir:
extra_dir = install_dir.replace('external/eigen_archive', '')
if not os.path.exists(extra_dir):
self.mkpath(extra_dir)
self.copy_file(header, extra_dir)
if not os.path.exists(install_dir):
self.mkpath(install_dir)
return self.copy_file(header, install_dir)
def run(self):
hdrs = self.distribution.headers
if not hdrs:
return
self.mkpath(self.install_dir)
for header in hdrs:
(out, _) = self.mkdir_and_copy_file(header)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
def find_files(pattern, root):
"""Return all the files matching pattern below root dir."""
for path, _, files in os.walk(root):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
matches = ['../' + x for x in find_files('*', 'external') if '.py' not in x]
if os.name == 'nt':
EXTENSION_NAME = 'python/_pywrap_tensorflow.pyd'
else:
EXTENSION_NAME = 'python/_pywrap_tensorflow.so'
headers = (list(find_files('*.h', 'tensorflow/core')) +
list(find_files('*.h', 'google/protobuf/src')) +
list(find_files('*', 'third_party/eigen3')) +
list(find_files('*', 'external/eigen_archive')))
setup(
name=project_name,
version=_VERSION,
description='TensorFlow helps the tensors flow',
long_description='',
url='http://tensorflow.org/',
author='Google Inc.',
author_email='opensource@google.com',
# Contained modules and scripts.
packages=find_packages(),
entry_points={
'console_scripts': CONSOLE_SCRIPTS,
},
headers=headers,
install_requires=REQUIRED_PACKAGES,
tests_require=REQUIRED_PACKAGES + TEST_PACKAGES,
# Add in any packaged data.
include_package_data=True,
package_data={
'tensorflow': [EXTENSION_NAME,
'tensorboard/dist/bazel-html-imports.html',
'tensorboard/dist/index.html',
'tensorboard/dist/tf-tensorboard.html',
'tensorboard/lib/css/global.css',
'tensorboard/TAG',
] + matches,
},
zip_safe=False,
distclass=BinaryDistribution,
cmdclass={
'install_headers': InstallHeaders,
'install': InstallCommand,
},
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries',
],
license='Apache 2.0',
keywords='tensorflow tensor machine learning',
)
| 31.852381 | 80 | 0.658394 |
acf9ed4ac6e0c69a599f29a2663b8b7b11b23445 | 2,053 | py | Python | nicos_demo/vrefsans/setups/special/monitor_mp-PO.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | nicos_demo/vrefsans/setups/special/monitor_mp-PO.py | ebadkamil/nicos | 0355a970d627aae170c93292f08f95759c97f3b5 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | 1 | 2021-08-18T10:55:42.000Z | 2021-08-18T10:55:42.000Z | nicos_demo/vrefsans/setups/special/monitor_mp-PO.py | ISISComputingGroup/nicos | 94cb4d172815919481f8c6ee686f21ebb76f2068 | [
"CC-BY-3.0",
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | # created by MP
# 04.12.2017 07:53:39
# to call it
# ssh refsans@refsansctrl01 oder 02
# INSTRUMENT=nicos_mlz.refsans bin/nicos-monitor -S monitor_mp-PO
# not perfect but working
description = 'Everyting at Sampleposition (ex-sample)'
group = 'special'
_big = Column(
Block('Big Goniometer with Encoder', [
BlockRow(Field(name='gonio Theta', dev='gonio_theta', width=30, unit='Grad'),),
BlockRow(Field(name='gonio Phi', dev='gonio_phi', width=30, unit='Grad'),),
BlockRow(Field(name='gonio Omega', dev='gonio_omega', width=30, unit='Grad'),),
BlockRow(Field(name='gonio Y', dev='gonio_y', width=30, unit='mm'),),
BlockRow(Field(name='gonio Z', dev='gonio_z', width=30, unit='mm'),),
],
),
)
_top = Column(
Block('Small Goniometer on Samplechanger', [
BlockRow(Field(name='gonio_top Theta',dev='gonio_top_theta',width=30, unit='Grad'),),
BlockRow(Field(name='gonio_top Phi', dev='gonio_top_phi', width=30, unit='Grad'),),
BlockRow(Field(name='gonio_top Z', dev='gonio_top_z', width=30, unit='mm'),),
],
),
)
_mix = Column(
Block('sonst1', [
# Block('Backguard', [
BlockRow(Field(name='Backguard', dev='backguard', width=30, unit='mm'),),
# ],
# Block('Samplechanger', [
BlockRow(Field(name='Samplechanger', dev='samplechanger', width=30, unit='mm'),),
# ],
# Block('Monitor', [
BlockRow(Field(name='Monitor typ', dev='prim_monitor_typ', width=30),),
BlockRow(Field(name='Monitor X', dev='prim_monitor_x', width=30, unit='mm'),),
BlockRow(Field(name='Monitor Y', dev='prim_monitor_y', width=30, unit='mm'),),
],
),
)
devices = dict(
Monitor = device('nicos.services.monitor.qt.Monitor',
title = description,
loglevel = 'info',
cache = 'localhost',
valuefont = 'Consolas',
padding = 5,
layout = [
Row(_big, _top, _mix),
],
),
)
| 36.017544 | 93 | 0.579152 |
acf9ed9d2b742822e6ed54e807c9b1f26afbb919 | 432 | py | Python | beholder/analyzer/downloader.py | eryktr/beholder | 31bf6a5bf20176fb990dc2e0f84043000bdfe188 | [
"MIT"
] | null | null | null | beholder/analyzer/downloader.py | eryktr/beholder | 31bf6a5bf20176fb990dc2e0f84043000bdfe188 | [
"MIT"
] | null | null | null | beholder/analyzer/downloader.py | eryktr/beholder | 31bf6a5bf20176fb990dc2e0f84043000bdfe188 | [
"MIT"
] | null | null | null | from beholder.analyzer.site import Site
from beholder.fetcher.fetcher import WebFetcher
class Downloader:
fetcher: WebFetcher
def __init__(self, fetcher: WebFetcher):
self.fetcher = fetcher
def download_reference(self, site: Site) -> None:
self.fetcher.fetch(site.addr, site.reference_path)
def download_updated(self, site: Site) -> None:
self.fetcher.fetch(site.addr, site.update_path)
| 27 | 58 | 0.719907 |
acf9edc2af2c21d8ff06d8fadcf5e0ce81173b25 | 1,888 | py | Python | pyvisa-py/common.py | Zanobos/pyvisa-py | 6ffdcd0f897267c5724b07589dff244cc72b15d7 | [
"MIT"
] | null | null | null | pyvisa-py/common.py | Zanobos/pyvisa-py | 6ffdcd0f897267c5724b07589dff244cc72b15d7 | [
"MIT"
] | null | null | null | pyvisa-py/common.py | Zanobos/pyvisa-py | 6ffdcd0f897267c5724b07589dff244cc72b15d7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
pyvisa-sim.common
~~~~~~~~~~~~~~~~~
Common code.
:copyright: 2014 by PyVISA-sim Authors, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
from __future__ import (division, unicode_literals, print_function,
absolute_import)
import logging
import sys
from pyvisa import logger
logger = logging.LoggerAdapter(logger, {'backend': 'py'})
class MockInterface(object):
def __init__(self, resource_name):
self.resource_name = resource_name
class NamedObject(object):
"""A class to construct named sentinels.
"""
def __init__(self, name):
self.name = name
def __repr__(self):
return '<%s>' % self.name
__str__ = __repr__
if sys.version >= '3':
def iter_bytes(data, mask=None, send_end=False):
if send_end and mask is None:
raise ValueError('send_end requires a valid mask.')
if mask is None:
for d in data[:]:
yield bytes([d])
else:
for d in data[:-1]:
yield bytes([d & ~mask])
if send_end:
yield bytes([data[-1] | ~mask])
else:
yield bytes([data[-1] & ~mask])
int_to_byte = lambda val: bytes([val])
last_int = lambda val: val[-1]
else:
def iter_bytes(data, mask=None, send_end=False):
if send_end and mask is None:
raise ValueError('send_end requires a valid mask.')
if mask is None:
for d in data[:]:
yield d
else:
for d in data[:-1]:
yield chr(ord(d) & ~mask)
if send_end:
yield chr(ord(data[-1]) | ~mask)
else:
yield chr(ord(data[-1]) & ~mask)
int_to_byte = chr
last_int = lambda val: ord(val[-1])
| 22.746988 | 73 | 0.545021 |
acf9ee1db91046c4c6c03db58665dcddb6a52a90 | 3,036 | py | Python | model_v1/tf_models.py | suchir/passenger_screening_algorithm_challenge | 65e3e3ce1889e9a100f6b9b6a53fe5c785a84612 | [
"MIT"
] | 7 | 2018-02-05T01:57:30.000Z | 2019-06-25T08:00:40.000Z | model_v1/tf_models.py | suchir/passenger_screening_algorithm_challenge | 65e3e3ce1889e9a100f6b9b6a53fe5c785a84612 | [
"MIT"
] | 1 | 2018-05-07T15:28:29.000Z | 2018-05-07T15:28:29.000Z | model_v1/tf_models.py | suchir/passenger_screening_algorithm_challenge | 65e3e3ce1889e9a100f6b9b6a53fe5c785a84612 | [
"MIT"
] | 3 | 2018-05-16T03:50:44.000Z | 2018-08-20T12:40:58.000Z | import tensorflow as tf
def hourglass_model(x, bottleneck, num_features, num_output, downsample=True):
image_size = 256
def block(x):
y = tf.layers.conv2d(x, num_features//2, 1, 1, padding='same', activation=tf.nn.relu)
y = tf.layers.conv2d(y, num_features//2, 3, 1, padding='same', activation=tf.nn.relu)
y = tf.layers.conv2d(y, num_features, 1, 1, padding='same', activation=tf.nn.relu)
return x + y
x = tf.reshape(x, [-1, image_size, image_size, 1])
if downsample:
x = tf.layers.conv2d(x, num_features, 7, 2, padding='same', activation=tf.nn.relu)
x = tf.layers.max_pooling2d(x, 2, 2)
blocks = []
while not blocks or blocks[-1].shape[1] > bottleneck:
x = block(x)
blocks.append(block(x))
x = tf.layers.max_pooling2d(x, 2, 2)
x = block(block(block(x)))
while blocks:
size = int(blocks[-1].shape[1])
x = tf.image.resize_images(x, (size, size))
x += blocks[-1]
x = block(x)
blocks.pop()
x = tf.layers.conv2d(x, num_output, 1, 1)
return x
def simple_cnn(x, num_features, num_conv, activation):
for i, count in enumerate(num_conv):
for _ in range(count):
x = tf.layers.conv2d(x, num_features, 3, padding='same', activation=activation)
if i+1 != len(num_conv):
x = tf.layers.max_pooling2d(x, 2, 2)
return x
def simple_multiview_cnn(images, zones, model, model_mode):
image_size, num_angles = int(images.shape[-1]), int(images.shape[1])
heatmaps = []
with tf.variable_scope('heatmaps') as scope:
for i in range(num_angles):
with tf.variable_scope('model'):
image = tf.reshape(images[:, i], [-1, image_size, image_size, 1])
heatmaps.append(model(image))
scope.reuse_variables()
output_size, num_features = int(heatmaps[0].shape[1]), int(heatmaps[0].shape[-1])
zones_flat = tf.reshape(zones, [-1, num_angles, output_size*output_size, 17])
heatmaps = tf.stack(heatmaps, axis=1)
heatmaps_flat = tf.reshape(heatmaps, [-1, num_angles, output_size*output_size, num_features])
zone_features = tf.matmul(tf.transpose(zones_flat, [0, 1, 3, 2]), heatmaps_flat)
pooled_features = tf.reduce_max(zone_features, axis=1)
if model_mode == 'hybrid':
flat_features = tf.reshape(pooled_features, [-1, 17*num_features])
W = tf.get_variable('W', [17, num_features])
b = tf.get_variable('b', [17])
logits = tf.layers.dense(flat_features, 17) + tf.reduce_sum(pooled_features*W, axis=-1) + b
elif model_mode == 'dense':
flat_features = tf.reshape(pooled_features, [-1, 17*num_features])
logits = tf.layers.dense(flat_features, 17)
else:
W = tf.get_variable('W', [17, num_features])
b = tf.get_variable('b', [17])
logits = tf.reduce_sum(pooled_features * W, axis=-1) + b
return logits
def leaky_relu(x, alpha=0.5):
return tf.maximum(x, alpha*x)
| 37.481481 | 99 | 0.621871 |
acf9ee9b5483530b130ce7c54d952345b59cdbe1 | 81 | py | Python | wandb_utils/file_filter/__init__.py | pavitradangati/wandb-utils | e965edc9db0f64ccaee0caeaad0676e0883bb142 | [
"MIT"
] | null | null | null | wandb_utils/file_filter/__init__.py | pavitradangati/wandb-utils | e965edc9db0f64ccaee0caeaad0676e0883bb142 | [
"MIT"
] | 9 | 2021-06-02T02:37:00.000Z | 2022-02-22T19:33:51.000Z | wandb_utils/file_filter/__init__.py | pavitradangati/wandb-utils | e965edc9db0f64ccaee0caeaad0676e0883bb142 | [
"MIT"
] | 1 | 2021-01-14T20:05:01.000Z | 2021-01-14T20:05:01.000Z | from .file_filter import FileFilter
from .glob_filter import GlobBasedFileFilter
| 27 | 44 | 0.876543 |
acf9ef7f6d228bf88d459dfe9ffff1c418f25d56 | 393 | py | Python | mergify_engine/actions/assign.py | mauricedb/mergify-engine | b7c9c66e830a4c0c1ab54459a265c90cc67c968d | [
"Apache-2.0"
] | null | null | null | mergify_engine/actions/assign.py | mauricedb/mergify-engine | b7c9c66e830a4c0c1ab54459a265c90cc67c968d | [
"Apache-2.0"
] | null | null | null | mergify_engine/actions/assign.py | mauricedb/mergify-engine | b7c9c66e830a4c0c1ab54459a265c90cc67c968d | [
"Apache-2.0"
] | null | null | null | import voluptuous
from mergify_engine import actions
class AssignAction(actions.Action):
validator = {voluptuous.Required("users", default=[]): [str]}
def run(
self,
installation_id,
installation_token,
event_type,
data,
pull,
missing_conditions,
):
pull.g_pull.as_issue().add_to_assignees(*self.config["users"])
| 20.684211 | 70 | 0.628499 |
acf9f02907c9158399c5ea5352441cec5acd7d6e | 58,647 | py | Python | src/bin/visitdiff.py | eddieTest/visit | ae7bf6f5f16b01cf6b672d34e2d293fa7170616b | [
"BSD-3-Clause"
] | null | null | null | src/bin/visitdiff.py | eddieTest/visit | ae7bf6f5f16b01cf6b672d34e2d293fa7170616b | [
"BSD-3-Clause"
] | null | null | null | src/bin/visitdiff.py | eddieTest/visit | ae7bf6f5f16b01cf6b672d34e2d293fa7170616b | [
"BSD-3-Clause"
] | 1 | 2020-03-18T23:17:43.000Z | 2020-03-18T23:17:43.000Z | ###############################################################################
#
# Purpose: Sets up a 2x2 layout of vis windows and expressions for database
# differencing of scalar variables (vector variables are still to
# be added). The intersection of the scalar variable names is
# calculated and we define CMFE expressions for them to compute
# dbl - dbr. Then, the first scalar in the set of CMFE expressions is
# plotted, along with its source values from dbl, dbr. The ell, 'l'
# and arr, 'r', nomenclature is to indicate left and right operands
# of the differencing operation.
#
# Usage: visit -diff dbl dbr [ -force_pos_cmfe ]
#
# Notes: dbl, dbr are the names of the databases that VisIt will difference.
#
# Future work: It would be nice if the CLI could define callback functions
# to be executed when the user does something such as changing
# the active variable. You can imagine that we would want to
# change the plotted variables from other windows too when
# executing such a callback function.
#
# Programmer: Mark C. Miller (based on original design by Brad Whitlock)
# Date: Wed Jul 18 10:17:11 PDT 2007
#
##############################################################################
import sys, string, os, re, time
###############################################################################
# Function: help
#
# Purpose: Print a useful help message
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def help():
print """
This python script is intended to be used in concert with VisIt's CLI and GUI.
It is invokved using the command 'visit -diff <dbl> <dbr> [ -force_pos_cmfe ]'.
This script will generate the necessary Cross-Mesh Field Evaluation (CMFE)
expressions to facilitate visualization and analysis of the differences between
two databases. VisIt will open windows to display both the left and right
databases as well as their difference. The windows are numbered as follows
Upper Left (1) -- shows Left-Right (a)Upper Right (2) -- shows Right-Left
Lower Left (3) -- shows Left Lower Right (4) -- shows Right
(a) only for position-based CMFE expressions.
VisIt uses the Cross-Mesh Field Evaluation (CMFE) expression functions
to generate the differences. A CMFE function creates an instance
of a variable from another (source) mesh on the specified (destination)
mesh. Therefore, window 1 (upper left) shows the difference obtained
when taking a variable from the mesh in the right database, applying the
CMFE expression function to map it onto the mesh in the left database and
then subtracting it from the same variable in the left database.
VisIt can use two variants of CMFE expression functions depending
on how similar the source and destination meshes are; connectivity-based
(conn_cmfe) which assumes the underlying mesh(s) for the left and right
databases have identical connectivity and position-based (pos_cmfe) which
does not make this assumption. VisIt will attempt to automatically select
which variant of CMFE expression to use based on some simple heuristics.
For meshes with identical connectivity, conn_cmfe expressions are
preferrable because they are higher performance and do not require VisIt
to perform any interpolation. In fact, the conn_cmfe operation is
perfectly anti-symmetric. That is Left-Right = -(Right-Left).
The same cannot be said for pos_cmfe expressions. For this reason,
window 2 (upper right) is only ever active when using position-based CMFE
expressions. It shows the (possibly different) difference obtained when
taking a variable from the mesh in the left database, applying the CMFE
expression function to map it onto the mesh in the right database and
then subtracting it from the same variable in the right database.
Pos_cmfe expressions will attempt to generate useful results regardless of
the similarity of the underlying meshes. You can force use of pos_cmfe
expressions by adding '-force_pos_cmfe' to the command line when running
'visit -diff'.
Note that the differences VisIt will compute in this mode are single
precision. This is true regardless of whether the input data is itself
double precision. VisIt will convert double precision to single
precision before processing it. Although this is a result of earlier
visualization-specific design requirements and constraints, the intention
is that eventually double precision will be supported.
Expressions for the differences for all scalar variables will be under the
'diffs' submenu. For material volume fractions, the scalar volume fraction
variables will be under the 'matvf_comps' submenu and their differences will
be under 'diffs/matvf_comps' submenu. Likewise for vector variables, their
scalar components will be under the 'vector_comps' submenu and their
differences under the 'diffs/vector_comps' submenu.
'visit -diff' is operated using a combination of VisIt's GUI and CLI.
There are a number of python functions defined in this script. These
are...
ToggleMesh() -- Toggle the mesh plot(s) on/off.
ToggleBoundary() -- Toggle the material boundary plot(s) on/off.
ToggleHidePloti() -- Toggle hiding the ith plot in the plot list(s)
DiffSummary() -- Examine all variables in the database and report a
summary of differences found in each.
ChangeVar("foo") -- Change the variable displayed in all windows to "foo".
ZPick((1,2,3)) -- Perform a zone-pick in all windows for the zone ids 1,2,3
NPick((4,5,6)) -- Perform a node-pick in all windows for the node ids 4,5,6
For the functions described above with no arguments, there are pre-defined macros
in VisIt's GUI that can be found under Controls->Macros. Not all of the convenience
functions available in this script are actionable through the GUI. Only those that
DO NOT reuquire some kind of user input are.
Finally, you should be able to do whatever operations you wish in a given window
and then synchronize all other windows to the same state. To do this, add whatever
operators, plots, as well as adjustments to plot and operator attributes you wish
to a given window. Then use the SyncWindows() method to bring all other windows
into a consistent state. For example, if you add plots and operators to the window
1 (the upper left window where L-R is displayed), then do SyncWindows(1) will bring
all other windows into an identical state.
SyncWindows(a) -- Synchronize all windows to window 'a', where a is 1...4.
There are buttons defined in Controls->Macros to perform these synchronization
operations. For example, the SyncToL-RDiff button will synchronize all windows
to be consistent with whatever was done in the window where L-R is displayed
(upper left).
Finally, if you move around in time in a given window, use the SyncTimeState()
method to synchronise all windows to the current time state.
SyncTimeStates(a) -- Synchronise all windows' time state to window 'a'.
Note that 'visit -diff' cannot currently handle differences in databases that
have a different number of time states.
"""
###############################################################################
# Function: GetDiffVarNames
#
# Purpose: Given any variable's name (in diff menu or submenus) return all
# varieties of names for it. If absolute and relative differencing
# is added, this is the place to handle the naming.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def GetDiffVarNames(name):
retval = ()
varname = re.search("diff/(.*)", name)
if varname != None:
varname = varname.group(1)
retval = (varname, name)
else:
retval = (name, "diff/%s"%name)
return retval
###############################################################################
# Function: GetNDomains
#
# Purpose: Return number of domains for a given mesh
#
# Programmer: Brad Whitlock
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def GetNDomains(metadata, meshname):
nd = 1
for mi in range(metadata.GetNumMeshes()):
if metadata.GetMeshes(mi).name == meshname:
nd = metadata.GetMeshes(mi).numBlocks
break
return nd
###############################################################################
# Function: GetMeshType
#
# Purpose: Return type of given mesh
#
# Programmer: Brad Whitlock
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def GetMeshType(metadata, meshname):
mt = -1
for mi in range(metadata.GetNumMeshes()):
if metadata.GetMeshes(mi).name == meshname:
mt = metadata.GetMeshes(mi).meshType
break
return mt
###############################################################################
# Function: GetVarInfo
#
# Purpose: Return a named portion of a metadata object
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def GetVarInfo(metadata, varname):
for i in range(metadata.GetNumScalars()):
if metadata.GetScalars(i).name == varname:
return metadata.GetScalars(i)
for i in range(metadata.GetNumMeshes()):
if metadata.GetMeshes(i).name == varname:
return metadata.GetMeshes(i)
for i in range(metadata.GetNumMaterials()):
if metadata.GetMaterials(i).name == varname:
return metadata.GetMaterials(i)
for i in range(metadata.GetNumVectors()):
if metadata.GetVectors(i).name == varname:
return metadata.GetVectors(i)
for i in range(metadata.GetNumArrays()):
if metadata.GetArrays(i).name == varname:
return metadata.GetArrays(i)
for i in range(metadata.GetNumCurves()):
if metadata.GetCurves(i).name == varname:
return metadata.GetCurves(i)
for i in range(metadata.GetNumLabels()):
if metadata.GetLabels(i).name == varname:
return metadata.GetLabels(i)
for i in range(metadata.GetNumTensors()):
if metadata.GetTensors(i).name == varname:
return metadata.GetTensors(i)
return 0
###############################################################################
# Function: GetVarType
#
# Purpose: Return a variable's avt type
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
#
# Mark C. Miller, Tue Aug 28 16:25:05 PDT 2007
# Added logic to check defined expressions and expressions in metadata
#
###############################################################################
def GetVarType(metadata, varname):
theType = "Unknown"
vInfo = GetVarInfo(metadata, varname)
if vInfo != 0:
tmpType = re.search("<type 'avt([A-Z][a-z]*)MetaData'>", str(type(vInfo)))
if tmpType != None:
theType = tmpType.group(1)
# if we don't have an answer, look at currently defined expressions
if theType == "Unknown":
el = Expressions()
i = 0
while i < len(el) and theType == "Unknown":
exp = el[i]
if exp[0] == varname:
theType = "Scalar" # assume its a scalar
break
i = i + 1
# if we don't have an answer, look at expressions from the database
if theType == "Unknown":
el = metadata.GetExprList()
for i in range(el.GetNumExpressions()):
exp = el.GetExpressions(i)
if exp.name == varname:
tmpType = re.search("\ntype = ([A-Z][a-z]*)MeshVar", str(exp))
if tmpType != None:
theType = tmpType.group(1)
break
return theType
###############################################################################
# Function: MeshForVar
#
# Purpose: Determine the mesh for a given variable
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def MeshForVar(metadata, varname, dontCheckExpressions=0):
meshName = "Unknown"
vInfo = GetVarInfo(metadata, varname)
if vInfo != 0 and hasattr(vInfo, "meshName"):
tmpMeshName = re.search("\nmeshName = \"(.*)\"\n",str(vInfo))
if tmpMeshName != None:
meshName = tmpMeshName.group(1)
else:
# look at meshes themselves
for i in range(metadata.GetNumMeshes()):
if metadata.GetMeshes(i).name == varname:
meshName = varname
break
ttab = string.maketrans("()<>,:","@@@@@@")
# if we don't yet have an answer, look at current expressions
if meshName == "Unknown" and dontCheckExpressions == 0:
exprList = Expressions()
i = 0;
while i < len(exprList) and meshName == "Unknown":
theExpr = exprList[i]
if theExpr[0] == varname:
defnTmp = string.translate(theExpr[1],ttab)
defnFields = defnTmp.split('@')
for f in defnFields:
meshNameTmp = MeshForVar(metadata, f, 1)
if meshNameTmp != "Unknown":
meshName = meshNameTmp
break
i = i + 1
# if we don't yet have an answer, look at expressions from database
if meshName == "Unknown" and dontCheckExpressions == 0:
exprList = metadata.GetExprList()
i = 0;
while i < exprList.GetNumExpressions() and meshName == "Unknown":
theExpr = exprList.GetExpressions(i)
if theExpr.name == varname:
defnTmp = string.translate(theExpr.definition,ttab)
defnFields = defnTmp.split('@')
for f in defnFields:
meshNameTmp = MeshForVar(metadata, f, 1)
if meshNameTmp != "Unknown":
meshName = meshNameTmp
break
i = i + 1
return meshName
###############################################################################
# Function: MatForMesh
#
# Purpose: Return a material object for a given mesh
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def MatForMesh(metadata, meshname):
for i in range(metadata.GetNumMaterials()):
if metadata.GetMaterials(i).meshName == meshname:
return metadata.GetMaterials(i).name
###############################################################################
# Function: GetVarCentering
#
# Purpose: Return the centering for a given variable
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def GetVarCentering(metadata, varname):
centering = "Unknown"
vInfo = GetVarInfo(metadata, varname)
if vInfo != 0 and hasattr(vInfo, "centering"):
tmpCentering = re.search("\ncentering = (AVT_[A-Z]*) *#.*\n",str(vInfo))
if tmpCentering != None:
centering = tmpCentering.group(1)
return centering
###############################################################################
# Function: IsNotScalarVarPlotType
#
# Purpose: Return whether or not the given plot type supports simple scalar
# variables.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def IsNotScalarVarPlotType(plotType):
plotTypeName = PlotPlugins()[plotType]
if plotTypeName == "Mesh" or \
plotTypeName == "Boundary" or \
plotTypeName == "FilledBoundary" or \
plotTypeName == "Vector" or \
plotTypeName == "Molecule" or \
plotTypeName == "Subset":
return 1
return 0
###############################################################################
# Function: GetCurrentTimeState
#
# Purpose: Given a window id, return the current time state in that window
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def GetCurrentTimeState(win):
SetActiveWindow(win)
wi = GetWindowInformation()
if wi.activeTimeSlider == -1:
return 0
return wi.timeSliderCurrentStates[wi.activeTimeSlider]
###############################################################################
# Function: SyncTimeStates
#
# Purpose: Ensure that the various data structures of this script are brought up
# to date with the current time state of the specified source window.
# Also, ensure that all windows' time states are brought up to date with
# the specified window's time state.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def SyncTimeStates(srcWin):
global currentTimeState
global mdl
global mdr
global dbl
global dbr
if currentTimeState != -1:
# no work to do if the current time state is already set
tmpCurrentTimeState = GetCurrentTimeState(srcWin)
if currentTimeState == tmpCurrentTimeState:
print "Time state is up to date"
return
print "Updating time state to state %d"%tmpCurrentTimeState
currentTimeState = tmpCurrentTimeState
else:
print "Updating time state to state 0"
currentTimeState = 0
TimeSliderSetState(currentTimeState)
# There is a bug with correlations when time arg is used to GetMetaData.
# Without it, it turns out we always get state zero.
# mdl = GetMetaData(dbl, currentTimeState)
# mdr = GetMetaData(dbr, currentTimeState)
mdl = GetMetaData(dbl)
mdr = GetMetaData(dbr)
if mdl.numStates != mdr.numStates:
print "Database \"%s\" has %d states"%(dbl, mdl.numStates)
print "Database \"%s\" has %d states"%(dbr, mdr.numStates)
print "Currently, 'visit -diff' is unable to handle databases with different numbers of states"
sys.exit(4)
UpdateExpressions(mdl, mdr)
###############################################################################
# Function: SyncTime...
#
# Purpose: Stubs to register as macros
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def SyncTimeL_R():
SyncTimeStates(1)
def SyncTimeR_L():
SyncTimeStates(2)
def SyncTimeLeft():
SyncTimeStates(3)
def SyncTimeRight():
SyncTimeStates(4)
###############################################################################
# Function: ProcessCLArgs
#
# Purpose: Read the command line arguments
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
# Mark C. Miller, Tue Aug 21 11:17:20 PDT 2007
# Added support for difference summary mode
#
# Mark C. Miller, Tue Aug 28 16:25:05 PDT 2007
# Added logic to set noWinMode
#
# Brad Whitlock, Wed Feb 3 17:08:34 PST 2010
# I added an -diff_format argument to compensate for the recent loss
# of --assume_format elsewhere in VisIt.
#
###############################################################################
def ProcessCLArgs():
global dbl
global dbr
global forcePosCMFE
global diffSummaryOnly
global noWinMode
try:
i = 1
while i < len(sys.argv):
if sys.argv[i] == "-vdiff":
dbl = sys.argv[i+1]
dbr = sys.argv[i+2]
i = i + 2
if sys.argv[i] == "-force_pos_cmfe":
forcePosCMFE = 1
if sys.argv[i] == "-summary_only":
diffSummaryOnly = 1
if sys.argv[i] == "-nowin":
noWinMode = 1
if sys.argv[i] == "-diff_format":
SetPreferredFileFormats(sys.argv[i+1])
i = i + 1
i = i + 1
except:
print "The -vdiff flag takes 2 database names.", dbl, dbr
sys.exit(1)
if dbl == "notset" or dbr == "notset":
print "The -vdiff argument was not given."
sys.exit(2)
###############################################################################
# Function: UpdateThisExpression
#
# Purpose: Given the list of currently defined expressions, determine if the
# new expression (exprName, expr) is being added, updated or left
# unchanged.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def UpdateThisExpression(exprName, expr, currentExpressions, addedExpressions,
updatedExpressions, unchangedExpressions):
# Add or update the expression.
foundExprName = 0
for expr_i in range(len(currentExpressions)):
if currentExpressions[expr_i][0] == exprName:
foundExprName = 1
if currentExpressions[expr_i][1] == expr:
unchangedExpressions.append(exprName)
break
else:
DeleteExpression(exprName)
DefineScalarExpression(exprName, expr)
updatedExpressions.append(exprName)
break
if foundExprName == 0:
DefineScalarExpression(exprName, expr)
addedExpressions.append(exprName)
###############################################################################
# Function: UpdateExpressions
#
# Purpose: Define various expressions needed to represent the difference
# between corresponding variables in the left and right databases.
#
# First, we get the currently defined expressions and remove any
# that come from the database metadata.
#
# Next, we iterate over all scalar variables defining either conn_
# of pos_ cmfes for their difference. Note: We don't really handle
# the R-PosCMFE(L) case yet.
#
# Next, we iterate over all material objects, defining matvf
# expressions for each material as a scalar and then difference
# expressions for these scalars. Likewise for vector variables.
#
# Finally, we use UpdateThisExpression to ensure we don't re-define
# the same expressions and remove old expressions as we vary time
# states.
#
# Programmer: Brad Whiltlock
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
# Mark C. Miller, Wed Jul 18 18:12:28 PDT 2007
# Made it work on material volume fractions and vectors. Made it handle
# changes in timesteps along with adding new expressions for new variables,
# deleting old expressions and leaving unchanged expressions alone.
#
# Mark C. Miller, Thu Jul 19 21:36:47 PDT 2007
# Inverted loops to identify pre-defined expressions coming from md.
#
# Mark C. Miller, Tue Aug 21 11:17:20 PDT 2007
# Added support for difference summary mode
#
# Mark C. Miller, Thu Dec 3 20:53:21 PST 2009
# Apply patch from Cihan Altinay for typo of 'numDims' on vector var
# metadata to 'varDims'
###############################################################################
def UpdateExpressions(mdl, mdr):
global forcePosCMFE
global cmfeMode
global diffVars
if diffSummaryOnly == 0:
print "Defining expressions for state %d"%currentTimeState
cmfeModeNew = 0
diffVarsNew = []
addedExpressions = []
updatedExpressions = []
deletedExpressions = []
unchangedExpressions = []
currentExpressionsTmp = Expressions()
currentExpressionsList = []
# remove any pre-defined expressions in currentExpressions
# coming from the metadata
for expr_i in range(len(currentExpressionsTmp)):
foundIt = 0
# Look for it in the left db's metadata
for expr_j in range(mdl.GetExprList().GetNumExpressions()):
if currentExpressionsTmp[expr_i][0] == \
mdl.GetExprList().GetExpressions(expr_j).name:
foundIt = 1
break
if foundIt == 0:
# Look for it in the right db's metadata
for expr_j in range(mdr.GetExprList().GetNumExpressions()):
if currentExpressionsTmp[expr_i][0] == \
mdr.GetExprList().GetExpressions(expr_j).name:
foundIt = 1
break
# If we didn't find it in either left or right dbs md, it is
# NOT a pre-defined expression. So, we can keep it.
if foundIt == 0:
currentExpressionsList.append(currentExpressionsTmp[expr_j])
currentExpressions = tuple(currentExpressionsList)
# Iterate over all the scalar variables in metadata.
for scalar_i1 in range(mdl.GetNumScalars()):
for scalar_i2 in range(mdr.GetNumScalars()):
valid = mdl.GetScalars(scalar_i1).validVariable and \
mdr.GetScalars(scalar_i2).validVariable
namematch = mdl.GetScalars(scalar_i1).name == \
mdr.GetScalars(scalar_i2).name
if valid and namematch:
# Create the expression name.
if mdl.GetScalars(scalar_i1).name[0] == '/':
exprName = "diff" + mdl.GetScalars(scalar_i1).name
else:
exprName = "diff/" + mdl.GetScalars(scalar_i1).name
# The name of the scalar
sName = mdl.GetScalars(scalar_i1).name
qsName = sName
if string.find(qsName, "/") != -1:
qsName = "<" + qsName + ">"
# Determine some properties about the mesh so we can decide
# Whether we'll use conn_cmfe or pos_cmfe.
m1Name = mdl.GetScalars(scalar_i1).meshName
m2Name = mdr.GetScalars(scalar_i2).meshName
nb1 = GetNDomains(mdl, m1Name)
mt1 = GetMeshType(mdl, m1Name)
nb2 = GetNDomains(mdr, m2Name)
mt2 = GetMeshType(mdr, m2Name)
if nb1 == nb2 and mt1 == mt2 and m1Name == m2Name and forcePosCMFE != 1:
expr = "%s - conn_cmfe(<%s:%s>, %s)" % (qsName, dbr, sName, m1Name)
else:
expr = "%s - pos_cmfe(<%s:%s>, %s, 0.)" % (qsName, dbr, sName, m1Name)
cmfeModeNew = 1
diffVarsNew.append(exprName)
UpdateThisExpression(exprName, expr, currentExpressions, addedExpressions,
updatedExpressions, unchangedExpressions)
# Iterate over all the material variables in metadata.
for mat_i1 in range(mdl.GetNumMaterials()):
for mat_i2 in range(mdr.GetNumMaterials()):
matl = mdl.GetMaterials(mat_i1)
matr = mdr.GetMaterials(mat_i2)
valid = matl.validVariable and matr.validVariable
nameMatch = matl.name == matr.name
numMatsMatch = matl.numMaterials == matr.numMaterials
matNamesMatch = matl.materialNames == matr.materialNames
if valid and nameMatch and numMatsMatch and matNamesMatch:
# Determine some properties about the mesh so we can decide
# Whether we'll use conn_cmfe or pos_cmfe.
m1Name = matl.meshName
m2Name = matr.meshName
nb1 = GetNDomains(mdl, m1Name)
mt1 = GetMeshType(mdl, m1Name)
nb2 = GetNDomains(mdr, m2Name)
mt2 = GetMeshType(mdr, m2Name)
for m in range(matl.numMaterials):
# Create the matvf expression for this mat
matName = matl.materialNames[m]
altMatName = matName.replace(" ","_")
matNum = matName.split(' ')[0]
matvfExprName = "matvf_comps/" + altMatName
matvfexpr = "matvf(%s,[%s])"%(matl.name, matNum)
UpdateThisExpression(matvfExprName, matvfexpr, currentExpressions, addedExpressions,
updatedExpressions, unchangedExpressions)
# Create the expression for the difference in matvfs for this mat
exprName = "diff/matvf_comps/" + altMatName
if nb1 == nb2 and mt1 == mt2 and m1Name == m2Name and forcePosCMFE != 1:
expr = "<matvf_comps/%s> - conn_cmfe(<%s:matvf_comps/%s>, %s)" % (altMatName, dbr, altMatName, m1Name)
else:
expr = "<matvf_comps/%s> - pos_cmfe(<%s:matvf_comps/%s>, %s, 0.)" % (altMatName, dbr, altMatName, m1Name)
cmfeModeNew = 1
diffVarsNew.append(exprName)
UpdateThisExpression(exprName, expr, currentExpressions, addedExpressions,
updatedExpressions, unchangedExpressions)
# Iterate over all the vector variables in metadata.
for vec_i1 in range(mdl.GetNumVectors()):
for vec_i2 in range(mdr.GetNumVectors()):
vecl = mdl.GetVectors(vec_i1)
vecr = mdr.GetVectors(vec_i2)
valid = vecl.validVariable and vecr.validVariable
nameMatch = vecl.name == vecr.name
numDimsMatch = vecl.varDim == vecr.varDim
if valid and nameMatch and numDimsMatch:
# Determine some properties about the mesh so we can decide
# Whether we'll use conn_cmfe or pos_cmfe.
m1Name = vecl.meshName
m2Name = vecr.meshName
nb1 = GetNDomains(mdl, m1Name)
mt1 = GetMeshType(mdl, m1Name)
nb2 = GetNDomains(mdr, m2Name)
mt2 = GetMeshType(mdr, m2Name)
for m in range(vecl.varDim):
# Create the expression to extract a component for this vector
compName = vecl.name + "%02d"%m
vecExprName = "vector_comps/" + compName
vecexpr = "%s[%d]"%(vecl.name, m)
UpdateThisExpression(vecExprName, vecexpr, currentExpressions, addedExpressions,
updatedExpressions, unchangedExpressions)
# Create the expression for the difference in components
exprName = "diff/vector_comps/" + compName
if nb1 == nb2 and mt1 == mt2 and m1Name == m2Name and forcePosCMFE != 1:
expr = "<vector_comps/%s> - conn_cmfe(<%s:vector_comps/%s>, %s)" % (compName, dbr, compName, m1Name)
else:
expr = "<vector_comps/%s> - pos_cmfe(<%s:vector_comps/%s>, %s, 0.)" % (compName, dbr, compName, m1Name)
cmfeModeNew = 1
diffVarsNew.append(exprName)
UpdateThisExpression(exprName, expr, currentExpressions, addedExpressions,
updatedExpressions, unchangedExpressions)
# Finally, delete any expressions we aren't using anymore.
for expr_i in range(len(currentExpressions)):
foundExprName = 0
for expr_j in range(len(unchangedExpressions)):
if unchangedExpressions[expr_j] == currentExpressions[expr_i][0]:
foundExprName = 1
break
for expr_j in range(len(updatedExpressions)):
if updatedExpressions[expr_j] == currentExpressions[expr_i][0]:
foundExprName = 1
break
for expr_j in range(len(addedExpressions)):
if addedExpressions[expr_j] == currentExpressions[expr_i][0]:
foundExprName = 1
break
# if foundExprName == 0:
# DeleteExpression(currentExpressions[expr_i][0])
# deletedExpressions.append(currentExpressions[expr_i][0])
# Print out some information about what we did
if diffSummaryOnly == 0:
if len(addedExpressions) > 0:
print " Added %d expressions..."%len(addedExpressions)
for expr_i in range(len(addedExpressions)):
print " %s"%addedExpressions[expr_i]
if len(unchangedExpressions) > 0:
print " Unchanged %d expressioons..."%len(unchangedExpressions)
for expr_i in range(len(unchangedExpressions)):
print " %s"%unchangedExpressions[expr_i]
if len(updatedExpressions) > 0:
print " Updated %d expressions..."%len(updatedExpressions)
for expr_i in range(len(updatedExpressions)):
print " %s"%updatedExpressions[expr_i]
if len(deletedExpressions) > 0:
print " Deleted %d expressions"%len(deletedExpressions)
for expr_i in range(len(deletedExpressions)):
print " %s"%deletedExpressions[expr_i]
print "Finished defining expressions"
cmfeMode = cmfeModeNew
diffVarsNew.sort()
diffVars = diffVarsNew
###############################################################################
# Function: Initialize
#
# Purpose: Setup the initial windows and behavior
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
# Mark C. Miller, Tue Aug 21 11:17:20 PDT 2007
# Added support for difference summary mode
#
# Mark C. Miller, Tue Aug 28 16:25:05 PDT 2007
# Added logic to return early when in nowin mode; for testing
#
# Brad Whitlock, Wed Feb 3 17:124:23 PST 2010
# Don't use SetWindowLayout because it causes small test baseline images.
#
###############################################################################
def Initialize():
global winDbMap
global cmfeMode
global oldw
global noWinMode
#
# Open left and right database operands
#
if OpenDatabase(dbl) == 0:
print "VisIt could not open ", dbl
sys.exit(3)
if OpenDatabase(dbr) == 0:
print "VisIt could not open ", dbr
sys.exit(3)
#
# Make a 2x2 window layout as follows
# 1: L-CMFE(R) 2: R-CMFE(L) -- only when cmfeMode==1
# 3: L 4: R
SetCloneWindowOnFirstRef(1)
ToggleLockTime()
ToggleLockViewMode()
for i in (0,1,2):
SetActiveWindow(1)
CloneWindow()
SetActiveWindow(1)
SyncTimeStates(0)
# If we were able to create any expressions, let's set up some plots based on the
# first one. That way, we can also set up some annotations.
winDbMap = {1 : dbl, 2 : dbr, 3 : dbl, 4 : dbr}
if len(diffVars) > 0:
theVar = GetDiffVarNames(diffVars[0])
windowsToVars = {1 : theVar[1], 2 : theVar[1], 3 : theVar[0], 4 : theVar[0]}
for win in (1,2,3,4):
SetActiveWindow(win)
DeleteAllPlots()
ActivateDatabase(winDbMap[win])
if win == 2 and cmfeMode == 0:
continue
AddPlot("Pseudocolor", windowsToVars[win])
else:
print "No plots are being set up by default since the databases did not have any scalars in common."
sys.exit(5)
# Set up text annotations.
windowsToAnnots = {1 : "L-ConnCMFE(R)", 2 : "Unused", 3 : "Left-db", 4 : "Right-db"}
if cmfeMode == 1:
windowsToAnnots = {1 : "L-PosCMFE(R)", 2 : "R-PosCMFE(L)", 3 : "Left-db", 4 : "Right-db"}
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
SetActiveWindow(win)
annot = CreateAnnotationObject("Text2D")
annot.text = windowsToAnnots[win]
annot.height = 0.03
annot.position = (0.70,0.95)
annot.useForegroundForTextColor = 0
annot.textColor = (255,0,0,255)
annot.fontBold = 1
SetActiveWindow(1)
CreateDatabaseCorrelation("DIFF", (dbl, dbr), 0)
# Open the GUI
if noWinMode == 0:
OpenGUI()
else:
return
SetWindowArea(410,0,1100,1100)
# Register macro only seems to work from window 1
SetActiveWindow(1)
RegisterMacro("DiffSummary", DiffSummary)
RegisterMacro("ToggleMesh", ToggleMesh)
RegisterMacro("ToggleBoundary", ToggleBoundary)
RegisterMacro("SyncWinsL-R", SyncWinsL_R)
RegisterMacro("SyncWinsR-L", SyncWinsR_L)
RegisterMacro("SyncWinsLeft", SyncWinsLeft)
RegisterMacro("SyncWinsRight", SyncWinsRight)
RegisterMacro("SyncTimeL-R", SyncTimeL_R)
RegisterMacro("SyncTimeR-L", SyncTimeR_L)
RegisterMacro("SyncTimeLeft", SyncTimeLeft)
RegisterMacro("SyncTimeRight", SyncTimeRight)
RegisterMacro("ToggleHidePlot0", ToggleHidePlot0)
RegisterMacro("ToggleHidePlot1", ToggleHidePlot1)
RegisterMacro("ToggleHidePlot2", ToggleHidePlot2)
RegisterMacro("ToggleHidePlot3", ToggleHidePlot3)
RegisterMacro("ToggleHidePlot4", ToggleHidePlot4)
RegisterMacro("ToggleHidePlot5", ToggleHidePlot5)
for win in (1,2,3,4):
SetActiveWindow(win)
DrawPlots()
SetActiveWindow(1)
if diffSummaryOnly == 0:
print "Type 'help()' to get more information on using 'visit -diff'"
###############################################################################
# Function: ChangeVar
#
# Purpose: Change the currently plotted variable in all windows
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
#
# Mark C. Miller, Tue Aug 28 16:25:05 PDT 2007
# Added logic to detect use of var from 'diff/' menu and issue warning
#
###############################################################################
def ChangeVar(new_var):
leadingDiff = re.search("^diff/(.*)", new_var)
if leadingDiff != None:
print "Passed variable from 'diff/' menu to ChangeVar()."
print "Pass only the original name of the variable to ChangeVar()."
print "Removing leading 'diff/' and using name \"%s\""%leadingDiff.group(1)
new_var = leadingDiff.group(1)
varType = GetVarType(mdl, new_var)
if varType == "Unknown":
print "Unable to find variable type for variable \"%s\""%new_var
return
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
SetActiveWindow(win)
plotToChange = -1
pl = GetPlotList()
for p in range(pl.GetNumPlots()):
plotType = pl.GetPlots(p).plotType
plotTypeName = PlotPlugins()[plotType]
if varType == "Material" and \
(plotTypeName == "Boundary" or \
plotTypeName == "FilledBoundary"):
plotToChange = p
elif varType == "Scalar" and \
(plotTypeName == "Contour" or \
plotTypeName == "Histogram" or \
plotTypeName == "Pseudocolor" or \
plotTypeName == "Spreadsheet" or \
plotTypeName == "Surface" or \
plotTypeName == "Volume"):
plotToChange = p
elif varType == "Vector" and \
(plotTypeName == "Streamline" or \
plotTypeName == "Vector" or \
plotTypeName == "Truecolor"):
plotToChange = p
elif varType == plotTypeName:
plotToChange = p
if plotToChange != -1:
break
if plotToChange != -1:
SetActivePlots((p,))
if win == 1:
ChangeActivePlotsVar("diff/%s"%new_var);
else:
ChangeActivePlotsVar(new_var);
else:
print "Unable to find an existing plot compatible with the variable \"%s\""%new_var
SetActiveWindow(1)
###############################################################################
# Function: HideAllUnHiddenPlots
#
# Purpose: Hides all plots that are currently NOT hidden in the specified
# window
#
# Programmer: Mark C. Miller
# Date: Mon Aug 27 16:58:29 PDT 2007
#
###############################################################################
def HideAllUnHiddenPlots(winId):
SetActiveWindow(winId)
pl = GetPlotList()
plotsToHide = []
for p in range(pl.GetNumPlots()):
plot = pl.GetPlots(p)
if plot.hiddenFlag == 0:
plotsToHide.append(p)
SetActivePlots(tuple(plotsToHide))
HideActivePlots()
return tuple(plotsToHide)
###############################################################################
# Function: UnHideAllUnHiddenPlots
#
# Purpose: Undoes the effect of HideAllUnHiddenPlots.
#
# Programmer: Mark C. Miller
# Date: Mon Aug 27 16:58:29 PDT 2007
#
###############################################################################
def UnHideAllUnHiddenPlots(winId, plotsToUnHide):
SetActiveWindow(winId)
SetActivePlots(plotsToUnHide)
HideActivePlots()
###############################################################################
# Function: ToggleHidePlot
#
# Purpose: Toggle hiding a specified plot id
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def ToggleHidePlot(plotId):
# determine target of the toggle (to hide or unhide)
hiddenTarget = 0
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
SetActiveWindow(win)
plotList = GetPlotList()
if plotId >= plotList.GetNumPlots():
print "Plot id %d is out of range 0...%d"%(plotId,plotList.GetNumPlots()-1)
return
if plotList.GetPlots(plotId).hiddenFlag == 1:
hiddenTarget = hiddenTarget - 1
else:
hiddenTarget = hiddenTarget + 1
# At this point, if hiddenTarget is largely negative, the target
# state is to UNhide the plots, else hide the plots
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
SetActiveWindow(win)
plotList = GetPlotList()
if plotList.GetPlots(plotId).hiddenFlag == 1:
if hiddenTarget <= 0:
SetActivePlots((plotId,))
HideActivePlots()
else:
if hiddenTarget > 0:
SetActivePlots((plotId,))
HideActivePlots()
SetActiveWindow(1)
def ToggleHidePlot0():
ToggleHidePlot(0)
def ToggleHidePlot1():
ToggleHidePlot(1)
def ToggleHidePlot2():
ToggleHidePlot(2)
def ToggleHidePlot3():
ToggleHidePlot(3)
def ToggleHidePlot4():
ToggleHidePlot(4)
def ToggleHidePlot5():
ToggleHidePlot(5)
###############################################################################
# Function: TogglePlot
#
# Purpose: Toggle a specified plot type on/off
#
# Determine all <plotTypeName> plots to be displayed or hidden based on
# the plot variables currently in window 1. First, find all the
# plots that are <plotTypeName> plots and record their hidden state in
# the plotInfo map. Next, find all the plots that are not <plotTypeName>,
# and see if the associated <plotTypeName> for those plots is already in
# the plotInfo map. If it is, then that variable's <plotTypeName> is already
# present and its status is recorded. Otherwise, that variable's
# <plotTypeName> gets added to the plotInfo map with a status of 0 (!exist)
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def TogglePlot(plotTypeName):
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
plotInfo = {}
SetActiveWindow(win)
pl = GetPlotList()
for p in range(pl.GetNumPlots()):
plot = pl.GetPlots(p)
if PlotPlugins()[plot.plotType] == plotTypeName:
plotName = plot.plotVar
try:
plotName = re.search("diff/(.*)",plot.plotVar).group(1)
except:
try:
plotName = re.search("(.*)",plot.plotVar).group(1)
except:
plotName = plot.plotVar
if plot.hiddenFlag == 1:
plotInfo[plotName] = (1, p) # exists and is hidden
else:
plotInfo[plotName] = (2, p) # exists and is displayed
#
# Second pass for the non-<plotTypeName> plots. Will determine only
# <plotTypeName> plots that need to be added.
#
for p in range(pl.GetNumPlots()):
plot = pl.GetPlots(p)
if PlotPlugins()[plot.plotType] != plotTypeName:
varName = plot.plotVar
try:
varName = re.search("diff/(.*)",plot.plotVar).group(1)
except:
try:
varName = re.search("(.*)",plot.plotVar).group(1)
except:
varName = plot.plotVar
plotName ="Unknown"
if plotTypeName == "Mesh":
plotName = MeshForVar(mdl,varName)
elif plotTypeName == "Boundary":
plotName = MeshForVar(mdl,varName)
plotName = MatForMesh(mdl,plotName)
if plotName == "Unknown":
continue
if plotName not in plotInfo:
plotInfo[plotName] = (0, p)
#
# At this point, plotInfo is populated with the names of all the <plotTypeName>
# plots and whether they are currently non-existant (0), hidden (1) or
# displayed (2) along with their index (p) in the plot list. So, now,
# we determine the target state of the TogglePlot command. Should the
# <plotTypeName> plot(s) be on (that is exist and displayed) or off (not-exist
# or hidden)? In general, the situation can be mixed at this point and
# so we determine based on majority status
#
if win == 1:
targetState = 0
for m in plotInfo.keys():
if plotInfo[m][0] == 0 or plotInfo[m][0] == 1:
targetState = targetState + 1
else:
targetState = targetState - 1
#
# First handle toggling of existing plots (hidden or displayed)
#
plotsToToggle = []
for m in plotInfo.keys():
if targetState > 0 and plotInfo[m][0] == 1:
plotsToToggle.append(plotInfo[m][1])
if targetState <= 0 and plotInfo[m][0] == 2:
plotsToToggle.append(plotInfo[m][1])
if len(plotsToToggle) > 0:
SetActivePlots(tuple(plotsToToggle))
HideActivePlots()
#
# Now handle adding new <plotTypeName> plots if needed
#
if targetState > 0:
for m in plotInfo.keys():
if plotInfo[m][0] == 0:
AddPlot(plotTypeName, m)
DrawPlots()
SetActiveWindow(1)
def ToggleMesh():
TogglePlot("Mesh")
def ToggleBoundary():
TogglePlot("Boundary")
###############################################################################
# Function: MinimizePickOutput
#
# Purpose: Reduce output generated by pick on stdout to bare minimum for
# PickLoop function.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def MinimizePickOutput():
global pa_orig
SuppressQueryOutputOn()
pa_orig = GetPickAttributes()
pa = pa_orig
pa.displayIncidentElements = 0
pa.showNodeId = 0
pa.showTimeStep = 0
pa.showMeshName = 0
pa.showZoneId = 0
pa.displayPickLetter = 1
SetPickAttributes(pa)
###############################################################################
# Function: UnMinimizePickOutput
#
# Purpose: Undue the reduction in pick output made by MinimizePickOutput.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def UnMinimizePickOutput():
global pa_orig
SetPickAttributes(pa_orig)
SuppressQueryOutputOff()
###############################################################################
# Function: PickLoop
#
# Purpose: Perform a zone or node pick over a specified tuple of element ids.
# Also, handle case where user may have added variables to the
# PickAttributes to be returned during picking. Report the output in
# a useful tabular form.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def PickLoop(ids, pickType):
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
SetActiveWindow(win)
ClearPickPoints()
ResetPickLetter()
s = ["","",""]
MinimizePickOutput()
npicks = 1
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
SetActiveWindow(win)
for id in ids:
if pickType == "zonal":
PickByZone(id)
else:
PickByNode(id)
tmp = GetPickOutput()
picks = []
if win == 1:
picks = re.findall("diff/(.*): *<(zonal|nodal)> = ([0-9.e+\-]*)\s*",tmp)
npicks = len(picks)
for p in range(len(picks)):
s[win-1] = s[win-1] + "%s=%s"%(picks[p][0], picks[p][2]) + ";"
else:
picks = re.findall("(.*): *<(zonal|nodal)> = ([0-9.e+\-]*)\s*",tmp)
for p in range(len(picks)):
s[win-1] = s[win-1] + "%s"%picks[p][2] + ";"
dpicks = s[0].split(";")
lpicks = s[2].split(";")
rpicks = s[3].split(";")
result = " id | var | DIFF | dbLeft | dbRight \n"
result = result + "---------|------------------|------------------|------------------|------------------\n"
k = 0
for id in ids:
for p in range(npicks):
dsplit = dpicks[k].split("=")
result = result + "% 9d|% 18s|% 18s|% 18s|% 18s\n"%(id,dsplit[0],dsplit[1],lpicks[k],rpicks[k])
k = k + 1
# Disabled for now: winds up poorly formatting the message
# ClientMethod("MessageBoxOk", result)
print result
SetActiveWindow(1)
UnMinimizePickOutput()
def ZPick(zoneIds):
PickLoop(zoneIds, "zonal")
def NPick(nodeIds):
PickLoop(nodeIds, "nodal")
###############################################################################
# Function: SyncWindows
#
# Purpose: Bring all the windows up to date with contents of the specified
# source window. This is done by deleting all the other windows and
# re-cloning them from the source. Although this is costly, it is
# the only easy way to ensure that all plots, operators, lighting,
# etc., are consistent.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
# Cyrus Harrison, Mon May 16 09:15:21 PDT 2011
# Update argument passed to GetAnnotationObject().
#
###############################################################################
def SyncWindows(srcWin):
global dbr
global dbl
#
# Get List of active plots
#
activePlotsList = []
hiddenPlotsList = []
SetActiveWindow(srcWin)
srcPlots = GetPlotList()
for p in range(srcPlots.GetNumPlots()):
if srcPlots.GetPlots(p).activeFlag == 1:
activePlotsList.append(p)
if srcPlots.GetPlots(p).hiddenFlag == 1:
hiddenPlotsList.append(p)
#
# Delete the old windows so we can re-clone them
#
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
if win == srcWin:
continue
SetActiveWindow(win)
DeleteWindow()
#
# Clone the src window and adjust variable names
#
for win in (1,2,3,4):
if win == 2 and cmfeMode == 0:
continue
if win == srcWin:
continue
SetActiveWindow(srcWin)
CloneWindow()
SetActiveWindow(win)
# re-set the annotations
ao = GetAnnotationObject(GetAnnotationObjectNames()[1])
if win == 1:
ReplaceDatabase(dbl)
if cmfeMode == 0:
ao.text = "L-ConnCMFE(R)"
else:
ao.text = "L-PosCMFE(R)"
elif win == 2:
ReplaceDatabase(dbr)
if cmfeMode == 0:
ao.text = "Unused"
else:
ao.text = "R-PosCMFE(L)"
elif win == 3:
ReplaceDatabase(dbl)
ao.text = "Left-db"
elif win == 4:
ReplaceDatabase(dbr)
ao.text = "Right-db"
ao.position = (0.7, 0.95)
# reset the plot variables
plots = GetPlotList()
for p in range(plots.GetNumPlots()):
pv = plots.GetPlots(p).plotVar
if IsNotScalarVarPlotType(plots.GetPlots(p).plotType):
continue
theVar = GetDiffVarNames(pv)
if win == 1 and pv == theVar[0]:
print "Warning: Looks like you are not displaying a diff variable in the DIFF window"
SetActivePlots((p,))
if win == 1:
ChangeActivePlotsVar(theVar[1])
else:
ChangeActivePlotsVar(theVar[0])
DrawPlots()
hiddenPlotsTmp = tuple(hiddenPlotsList)
if len(hiddenPlotsTmp) > 0:
SetActivePlots(tuple(hiddenPlotsList))
HideActivePlots()
SetActivePlots(tuple(activePlotsList))
SetActiveWindow(srcWin)
###############################################################################
# Function: SyncWins...
#
# Purpose: Stubs to register as macros
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def SyncWinsL_R():
SyncWindows(1)
def SyncWinsR_L():
SyncWindows(2)
def SyncWinsLeft():
SyncWindows(3)
def SyncWinsRight():
SyncWindows(4)
###############################################################################
# Function: CompareMinMaxInfos
#
# Purpose: Sorter function for sorting output from DiffSummary
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
###############################################################################
def CompareMinMaxInfos(a1, a2):
v1min = abs(a1[1])
v1max = abs(a1[5])
v2min = abs(a2[1])
v2max = abs(a2[5])
v1 = v1min
if v1max > v1min:
v1 = v1max
v2 = v2min
if v2max > v2min:
v2 = v2max
if v1 < v2:
return 1
elif v1 > v2:
return -1
else:
return 0
###############################################################################
# Function: DiffSummary
#
# Purpose: Iterate over all variables in diffVars and report differences.
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
# Mark C. Miller, Tue Aug 21 10:03:35 PDT 2007
# Added calls to disable re-draws and then re-enable to accelerate
#
# Mark C. Miller, Tue Aug 21 11:17:20 PDT 2007
# Added support for difference summary mode
#
# Mark C. Miller, Mon Aug 27 17:00:24 PDT 2007
# Added calls to Hide/UnHide all unhidden plots so we don't get a
# "plot dimensions don't match" error message from VisIt when displaying
# each variable in the list.
#
# Mark C. Miller, Tue Aug 28 16:25:05 PDT 2007
# Added return of result string to facilitate testing.
#
###############################################################################
def DiffSummary():
SetActiveWindow(1)
plotsToUnHide = HideAllUnHiddenPlots(1)
DisableRedraw()
SuppressQueryOutputOn()
diffSummary = []
resultStr=""
for v in diffVars:
vname = re.search("diff/(.*)",v)
if vname != None:
vname = vname.group(1)
else:
vname = v
if diffSummaryOnly == 1:
print "Processing variable \"%s\""%v
AddPlot("Pseudocolor", v)
DrawPlots()
Query("MinMax")
qo = GetQueryOutputString()
qv = GetQueryOutputValue()
mininfo = re.search("Min = ([0-9.e+\-]*) \((node|zone) ([0-9]*) (in domain ([0-9]*) at|at())",qo)
maxinfo = re.search("Max = ([0-9.e+\-]*) \((node|zone) ([0-9]*) (in domain ([0-9]*) at|at())",qo)
# val node|zone elem-id dom-id
# 0 1/5 2/6 3/7 4/8
if mininfo != None and maxinfo != None:
diffSummary.append( \
(vname[-12:], qv[0], mininfo.group(2), mininfo.group(3), mininfo.group(5), \
qv[1], maxinfo.group(2), maxinfo.group(3), maxinfo.group(5)))
else:
diffSummary.append((vname[-12:], 0.0, "Unknown", "Unknown", "Unknown", \
0.0, "Unknown", "Unknown", "Unknown"))
#time.sleep(0.5)
DeleteActivePlots()
SuppressQueryOutputOff()
print "Difference Summary sorted in decreasing difference magnitude...\n"
print "NOTE: Differences are computed in only single precision"
print " var |max -diff | max -elem ; -dom |max +diff | max +elem ; +dom |"
print "------------|------------|--------------------|------------|--------------------|"
diffSummary.sort(CompareMinMaxInfos)
for k in range(len(diffSummary)):
if diffSummary[k][1] == 0.0 and diffSummary[k][5] == 0.0:
print "% 12.12s| NO DIFFERENCES"%diffSummary[k][0]
resultStr = resultStr + "% 12.12s| NO DIFFERENCES\n"%diffSummary[k][0]
else:
print "% 12.12s|%+12.7f|%4s % 7s;% 7s|%+12.7f|%4s % 7s;% 7s|"%diffSummary[k]
resultStr = resultStr + "% 12.12s|%+12.7f|%4s % 7s;% 7s|%+12.7f|%4s % 7s;% 7s|\n"%diffSummary[k]
UnHideAllUnHiddenPlots(1, plotsToUnHide)
RedrawWindow()
return resultStr
###############################################################################
# Main program and global variables
#
# Programmer: Mark C. Miller
# Date: Wed Jul 18 18:12:28 PDT 2007
#
# Modifications:
# Mark C. Miller, Tue Aug 21 11:17:20 PDT 2007
# Added support for difference summary mode
###############################################################################
diffVars = []
dbl = "notset"
dbr = "notset"
mdl = 0
mdr = 0
forcePosCMFE = 0
diffSummaryOnly = 0
cmfeMode = 0
currentTimeState = -1
noWinMode = 0
ProcessCLArgs()
Initialize()
if diffSummaryOnly == 1:
DiffSummary()
sys.exit()
| 37.570147 | 129 | 0.564462 |
acf9f0b6edc48daf86cf151cd09e4b5c6d2e96c2 | 2,787 | py | Python | tests/test_custom_route_class.py | mtag-dev/squall | 8ab57ae650a52eae1471ce19b3d381f7252684e2 | [
"MIT"
] | 27 | 2021-12-04T15:54:59.000Z | 2022-02-19T15:37:35.000Z | tests/test_custom_route_class.py | mtag-dev/squall | 8ab57ae650a52eae1471ce19b3d381f7252684e2 | [
"MIT"
] | 21 | 2021-12-04T21:17:54.000Z | 2022-01-30T23:45:43.000Z | tests/test_custom_route_class.py | mtag-dev/squall | 8ab57ae650a52eae1471ce19b3d381f7252684e2 | [
"MIT"
] | 2 | 2021-12-29T10:53:59.000Z | 2022-01-12T05:01:02.000Z | import pytest
from squall import Router, Squall
from squall.routing.routes import APIRoute
from squall.testclient import TestClient
app = Squall()
class APIRouteA(APIRoute):
x_type = "A"
class APIRouteB(APIRoute):
x_type = "B"
class APIRouteC(APIRoute):
x_type = "C"
router_a = Router(route_class=APIRouteA, prefix="/a")
router_b = Router(route_class=APIRouteB, prefix="/b")
router_c = Router(route_class=APIRouteC, prefix="/c")
@router_a.get("/")
def get_a():
return {"msg": "A"}
@router_b.get("/")
def get_b():
return {"msg": "B"}
@router_c.get("/")
def get_c():
return {"msg": "C"}
router_b.include_router(router=router_c)
router_a.include_router(router=router_b)
app.include_router(router=router_a)
client = TestClient(app)
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "Squall", "version": "0.1.0"},
"paths": {
"/a": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Get A",
"operationId": "get_a_a_get",
}
},
"/a/b": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Get B",
"operationId": "get_b_a_b_get",
}
},
"/a/b/c": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Get C",
"operationId": "get_c_a_b_c_get",
}
},
},
}
@pytest.mark.parametrize(
"path,expected_status,expected_response",
[
("/a", 200, {"msg": "A"}),
("/a/b", 200, {"msg": "B"}),
("/a/b/c", 200, {"msg": "C"}),
("/openapi.json", 200, openapi_schema),
],
)
def test_get_path(path, expected_status, expected_response):
response = client.get(path)
assert response.status_code == expected_status
assert response.json() == expected_response
def test_route_classes():
routes = {}
for r in app.router.routes:
assert isinstance(r, APIRoute)
routes[r.path.path] = r
assert getattr(routes["/a"], "x_type") == "A"
assert getattr(routes["/a/b"], "x_type") == "B"
assert getattr(routes["/a/b/c"], "x_type") == "C"
| 24.234783 | 72 | 0.486186 |
acf9f15f9edfa99119959f250105a4e31da07d3c | 24,174 | py | Python | Lib/site-packages/textdistance/algorithms/edit_based.py | hirorin-demon/hirorin-streamlit | 03fbb6f03ec94f909d451e708a3b30b177607695 | [
"0BSD"
] | null | null | null | Lib/site-packages/textdistance/algorithms/edit_based.py | hirorin-demon/hirorin-streamlit | 03fbb6f03ec94f909d451e708a3b30b177607695 | [
"0BSD"
] | null | null | null | Lib/site-packages/textdistance/algorithms/edit_based.py | hirorin-demon/hirorin-streamlit | 03fbb6f03ec94f909d451e708a3b30b177607695 | [
"0BSD"
] | null | null | null | # built-in
from collections import defaultdict
from itertools import zip_longest
# app
from .base import Base as _Base, BaseSimilarity as _BaseSimilarity
try:
import numpy
except ImportError:
numpy = None
__all__ = [
'Hamming', 'MLIPNS',
'Levenshtein', 'DamerauLevenshtein',
'Jaro', 'JaroWinkler', 'StrCmp95',
'NeedlemanWunsch', 'Gotoh', 'SmithWaterman',
'hamming', 'mlipns',
'levenshtein', 'damerau_levenshtein',
'jaro', 'jaro_winkler', 'strcmp95',
'needleman_wunsch', 'gotoh', 'smith_waterman',
]
class Hamming(_Base):
"""
Compute the Hamming distance between the two or more sequences.
The Hamming distance is the number of differing items in ordered sequences.
https://en.wikipedia.org/wiki/Hamming_distance
"""
def __init__(self, qval=1, test_func=None, truncate=False, external=True):
self.qval = qval
self.test_func = test_func or self._ident
self.truncate = truncate
self.external = external
def __call__(self, *sequences):
sequences = self._get_sequences(*sequences)
result = self.quick_answer(*sequences)
if result is not None:
return result
_zip = zip if self.truncate else zip_longest
return sum([not self.test_func(*es) for es in _zip(*sequences)])
class Levenshtein(_Base):
"""
Compute the absolute Levenshtein distance between the two sequences.
The Levenshtein distance is the minimum number of edit operations necessary
for transforming one sequence into the other. The edit operations allowed are:
* deletion: ABC -> BC, AC, AB
* insertion: ABC -> ABCD, EABC, AEBC..
* substitution: ABC -> ABE, ADC, FBC..
https://en.wikipedia.org/wiki/Levenshtein_distance
TODO: https://gist.github.com/kylebgorman/1081951/9b38b7743a3cb5167ab2c6608ac8eea7fc629dca
"""
def __init__(self, qval=1, test_func=None, external=True):
self.qval = qval
self.test_func = test_func or self._ident
self.external = external
def _recursive(self, s1, s2):
# TODO: more than 2 sequences support
if not s1 or not s2:
return len(s1) + len(s2)
if self.test_func(s1[-1], s2[-1]):
return self(s1[:-1], s2[:-1])
# deletion/insertion
d = min(
self(s1[:-1], s2),
self(s1, s2[:-1]),
)
# substitution
s = self(s1[:-1], s2[:-1])
return min(d, s) + 1
def _cicled(self, s1, s2):
"""
source:
https://github.com/jamesturk/jellyfish/blob/master/jellyfish/_jellyfish.py#L18
"""
rows = len(s1) + 1
cols = len(s2) + 1
prev = None
if numpy:
cur = numpy.arange(cols)
else:
cur = range(cols)
for r in range(1, rows):
prev, cur = cur, [r] + [0] * (cols - 1)
for c in range(1, cols):
deletion = prev[c] + 1
insertion = cur[c - 1] + 1
dist = self.test_func(s1[r - 1], s2[c - 1])
edit = prev[c - 1] + (not dist)
cur[c] = min(edit, deletion, insertion)
return cur[-1]
def __call__(self, s1, s2):
s1, s2 = self._get_sequences(s1, s2)
result = self.quick_answer(s1, s2)
if result is not None:
return result
return self._cicled(s1, s2)
class DamerauLevenshtein(_Base):
"""
Compute the absolute Damerau-Levenshtein distance between the two sequences.
The Damerau-Levenshtein distance is the minimum number of edit operations necessary
for transforming one sequence into the other. The edit operations allowed are:
* deletion: ABC -> BC, AC, AB
* insertion: ABC -> ABCD, EABC, AEBC..
* substitution: ABC -> ABE, ADC, FBC..
* transposition: ABC -> ACB, BAC
https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance
"""
def __init__(self, qval=1, test_func=None, external=True):
self.qval = qval
self.test_func = test_func or self._ident
self.external = external
def _numpy(self, s1, s2):
# TODO: doesn't pass tests, need improve
d = numpy.zeros([len(s1) + 1, len(s2) + 1], dtype=numpy.int)
# matrix
for i in range(-1, len(s1) + 1):
d[i][-1] = i + 1
for j in range(-1, len(s2) + 1):
d[-1][j] = j + 1
for i, cs1 in enumerate(s1):
for j, cs2 in enumerate(s2):
cost = int(not self.test_func(cs1, cs2))
# ^ 0 if equal, 1 otherwise
d[i][j] = min(
d[i - 1][j] + 1, # deletion
d[i][j - 1] + 1, # insertion
d[i - 1][j - 1] + cost, # substitution
)
# transposition
if not i or not j:
continue
if not self.test_func(cs1, s2[j - 1]):
continue
d[i][j] = min(
d[i][j],
d[i - 2][j - 2] + cost,
)
return d[len(s1) - 1][len(s2) - 1]
def _pure_python(self, s1, s2):
"""
https://www.guyrutenberg.com/2008/12/15/damerau-levenshtein-distance-in-python/
"""
d = {}
# matrix
for i in range(-1, len(s1) + 1):
d[i, -1] = i + 1
for j in range(-1, len(s2) + 1):
d[-1, j] = j + 1
for i, cs1 in enumerate(s1):
for j, cs2 in enumerate(s2):
cost = int(not self.test_func(cs1, cs2))
# ^ 0 if equal, 1 otherwise
d[i, j] = min(
d[i - 1, j] + 1, # deletion
d[i, j - 1] + 1, # insertion
d[i - 1, j - 1] + cost, # substitution
)
# transposition
if not i or not j:
continue
if not self.test_func(cs1, s2[j - 1]):
continue
if not self.test_func(s1[i - 1], cs2):
continue
d[i, j] = min(
d[i, j],
d[i - 2, j - 2] + cost,
)
return d[len(s1) - 1, len(s2) - 1]
def __call__(self, s1, s2):
s1, s2 = self._get_sequences(s1, s2)
result = self.quick_answer(s1, s2)
if result is not None:
return result
# if numpy:
# return self._numpy(s1, s2)
# else:
return self._pure_python(s1, s2)
class JaroWinkler(_BaseSimilarity):
"""
Computes the Jaro-Winkler measure between two strings.
The Jaro-Winkler measure is designed to capture cases where two strings
have a low Jaro score, but share a prefix.
and thus are likely to match.
https://en.wikipedia.org/wiki/Jaro%E2%80%93Winkler_distance
https://github.com/Yomguithereal/talisman/blob/master/src/metrics/distance/jaro.js
https://github.com/Yomguithereal/talisman/blob/master/src/metrics/distance/jaro-winkler.js
"""
def __init__(self, long_tolerance=False, winklerize=True, qval=1, external=True):
self.qval = qval
self.long_tolerance = long_tolerance
self.winklerize = winklerize
self.external = external
def maximum(self, *sequences):
return 1
def __call__(self, s1, s2, prefix_weight=0.1):
s1, s2 = self._get_sequences(s1, s2)
result = self.quick_answer(s1, s2)
if result is not None:
return result
s1_len = len(s1)
s2_len = len(s2)
if not s1_len or not s2_len:
return 0.0
min_len = max(s1_len, s2_len)
search_range = (min_len // 2) - 1
if search_range < 0:
search_range = 0
s1_flags = [False] * s1_len
s2_flags = [False] * s2_len
# looking only within search range, count & flag matched pairs
common_chars = 0
for i, s1_ch in enumerate(s1):
low = max(0, i - search_range)
hi = min(i + search_range, s2_len - 1)
for j in range(low, hi + 1):
if not s2_flags[j] and s2[j] == s1_ch:
s1_flags[i] = s2_flags[j] = True
common_chars += 1
break
# short circuit if no characters match
if not common_chars:
return 0.0
# count transpositions
k = trans_count = 0
for i, s1_f in enumerate(s1_flags):
if s1_f:
for j in range(k, s2_len):
if s2_flags[j]:
k = j + 1
break
if s1[i] != s2[j]:
trans_count += 1
trans_count //= 2
# adjust for similarities in nonmatched characters
weight = common_chars / s1_len + common_chars / s2_len
weight += (common_chars - trans_count) / common_chars
weight /= 3
# stop to boost if strings are not similar
if not self.winklerize:
return weight
if weight <= 0.7 or s1_len <= 3 or s2_len <= 3:
return weight
# winkler modification
# adjust for up to first 4 chars in common
j = min(min_len, 4)
i = 0
while i < j and s1[i] == s2[i] and s1[i]:
i += 1
if i:
weight += i * prefix_weight * (1.0 - weight)
# optionally adjust for long strings
# after agreeing beginning chars, at least two or more must agree and
# agreed characters must be > half of remaining characters
if not self.long_tolerance or min_len <= 4:
return weight
if common_chars <= i + 1 or 2 * common_chars < min_len + i:
return weight
tmp = (common_chars - i - 1) / (s1_len + s2_len - i * 2 + 2)
weight += (1.0 - weight) * tmp
return weight
class Jaro(JaroWinkler):
def __init__(self, long_tolerance=False, qval=1, external=True):
super().__init__(
long_tolerance=long_tolerance,
winklerize=False,
qval=qval,
external=external,
)
class NeedlemanWunsch(_BaseSimilarity):
"""
Computes the Needleman-Wunsch measure between two strings.
The Needleman-Wunsch generalizes the Levenshtein distance and considers global
alignment between two strings. Specifically, it is computed by assigning
a score to each alignment between two input strings and choosing the
score of the best alignment, that is, the maximal score.
An alignment between two strings is a set of correspondences between the
characters of between them, allowing for gaps.
https://en.wikipedia.org/wiki/Needleman%E2%80%93Wunsch_algorithm
"""
positive = False
def __init__(self, gap_cost=1.0, sim_func=None, qval=1, external=True):
self.qval = qval
self.gap_cost = gap_cost
if sim_func:
self.sim_func = sim_func
else:
self.sim_func = self._ident
self.external = external
def minimum(self, *sequences):
return -max(map(len, sequences)) * self.gap_cost
def maximum(self, *sequences):
return max(map(len, sequences))
def distance(self, *sequences):
"""Get distance between sequences
"""
return -1 * self.similarity(*sequences)
def normalized_distance(self, *sequences):
"""Get distance from 0 to 1
"""
minimum = self.minimum(*sequences)
maximum = self.maximum(*sequences)
if maximum == 0:
return 0
return (self.distance(*sequences) - minimum) / (maximum - minimum)
def normalized_similarity(self, *sequences):
"""Get distance from 0 to 1
"""
minimum = self.minimum(*sequences)
maximum = self.maximum(*sequences)
if maximum == 0:
return 1
return (self.similarity(*sequences) - minimum) / (maximum * 2)
def __call__(self, s1, s2):
if not numpy:
raise ImportError('Please, install numpy for Needleman-Wunsch measure')
s1, s2 = self._get_sequences(s1, s2)
# result = self.quick_answer(s1, s2)
# if result is not None:
# return result * self.maximum(s1, s2)
dist_mat = numpy.zeros(
(len(s1) + 1, len(s2) + 1),
dtype=numpy.float,
)
# DP initialization
for i in range(len(s1) + 1):
dist_mat[i, 0] = -(i * self.gap_cost)
# DP initialization
for j in range(len(s2) + 1):
dist_mat[0, j] = -(j * self.gap_cost)
# Needleman-Wunsch DP calculation
for i, c1 in enumerate(s1, 1):
for j, c2 in enumerate(s2, 1):
match = dist_mat[i - 1, j - 1] + self.sim_func(c1, c2)
delete = dist_mat[i - 1, j] - self.gap_cost
insert = dist_mat[i, j - 1] - self.gap_cost
dist_mat[i, j] = max(match, delete, insert)
return dist_mat[dist_mat.shape[0] - 1, dist_mat.shape[1] - 1]
class SmithWaterman(_BaseSimilarity):
"""
Computes the Smith-Waterman measure between two strings.
The Smith-Waterman algorithm performs local sequence alignment;
that is, for determining similar regions between two strings.
Instead of looking at the total sequence, the Smith-Waterman algorithm compares
segments of all possible lengths and optimizes the similarity measure.
https://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm
https://github.com/Yomguithereal/talisman/blob/master/src/metrics/distance/smith-waterman.js
"""
def __init__(self, gap_cost=1.0, sim_func=None, qval=1, external=True):
self.qval = qval
self.gap_cost = gap_cost
self.sim_func = sim_func or self._ident
self.external = external
def maximum(self, *sequences):
return min(map(len, sequences))
def __call__(self, s1, s2):
if not numpy:
raise ImportError('Please, install numpy for Smith-Waterman measure')
s1, s2 = self._get_sequences(s1, s2)
result = self.quick_answer(s1, s2)
if result is not None:
return result
dist_mat = numpy.zeros(
(len(s1) + 1, len(s2) + 1),
dtype=numpy.float,
)
for i, sc1 in enumerate(s1, start=1):
for j, sc2 in enumerate(s2, start=1):
# The score for substituting the letter a[i - 1] for b[j - 1].
# Generally low for mismatch, high for match.
match = dist_mat[i - 1, j - 1] + self.sim_func(sc1, sc2)
# The scores for for introducing extra letters in one of the strings
# (or by symmetry, deleting them from the other).
delete = dist_mat[i - 1, j] - self.gap_cost
insert = dist_mat[i, j - 1] - self.gap_cost
dist_mat[i, j] = max(0, match, delete, insert)
return dist_mat[dist_mat.shape[0] - 1, dist_mat.shape[1] - 1]
class Gotoh(NeedlemanWunsch):
"""Gotoh score
Gotoh's algorithm is essentially Needleman-Wunsch with affine gap
penalties:
https://www.cs.umd.edu/class/spring2003/cmsc838t/papers/gotoh1982.pdf
"""
def __init__(self, gap_open=1, gap_ext=0.4, sim_func=None, qval=1, external=True):
self.qval = qval
self.gap_open = gap_open
self.gap_ext = gap_ext
if sim_func:
self.sim_func = sim_func
else:
self.sim_func = self._ident
self.external = external
def minimum(self, *sequences):
return -min(map(len, sequences))
def maximum(self, *sequences):
return min(map(len, sequences))
def __call__(self, s1, s2):
if not numpy:
raise ImportError('Please, install numpy for Gotoh measure')
s1, s2 = self._get_sequences(s1, s2)
# result = self.quick_answer(s1, s2)
# if result is not None:
# return result * self.maximum(s1, s2)
len_s1 = len(s1)
len_s2 = len(s2)
d_mat = numpy.zeros((len_s1 + 1, len_s2 + 1), dtype=numpy.float)
p_mat = numpy.zeros((len_s1 + 1, len_s2 + 1), dtype=numpy.float)
q_mat = numpy.zeros((len_s1 + 1, len_s2 + 1), dtype=numpy.float)
d_mat[0, 0] = 0
p_mat[0, 0] = float('-inf')
q_mat[0, 0] = float('-inf')
for i in range(1, len_s1 + 1):
d_mat[i, 0] = float('-inf')
p_mat[i, 0] = -self.gap_open - self.gap_ext * (i - 1)
q_mat[i, 0] = float('-inf')
q_mat[i, 1] = -self.gap_open
for j in range(1, len_s2 + 1):
d_mat[0, j] = float('-inf')
p_mat[0, j] = float('-inf')
p_mat[1, j] = -self.gap_open
q_mat[0, j] = -self.gap_open - self.gap_ext * (j - 1)
for i, sc1 in enumerate(s1, start=1):
for j, sc2 in enumerate(s2, start=1):
sim_val = self.sim_func(sc1, sc2)
d_mat[i, j] = max(
d_mat[i - 1, j - 1] + sim_val,
p_mat[i - 1, j - 1] + sim_val,
q_mat[i - 1, j - 1] + sim_val,
)
p_mat[i, j] = max(
d_mat[i - 1, j] - self.gap_open,
p_mat[i - 1, j] - self.gap_ext,
)
q_mat[i, j] = max(
d_mat[i, j - 1] - self.gap_open,
q_mat[i, j - 1] - self.gap_ext,
)
i, j = (n - 1 for n in d_mat.shape)
return max(d_mat[i, j], p_mat[i, j], q_mat[i, j])
class StrCmp95(_BaseSimilarity):
"""strcmp95 similarity
http://cpansearch.perl.org/src/SCW/Text-JaroWinkler-0.1/strcmp95.c
"""
sp_mx = (
('A', 'E'), ('A', 'I'), ('A', 'O'), ('A', 'U'), ('B', 'V'), ('E', 'I'),
('E', 'O'), ('E', 'U'), ('I', 'O'), ('I', 'U'), ('O', 'U'), ('I', 'Y'),
('E', 'Y'), ('C', 'G'), ('E', 'F'), ('W', 'U'), ('W', 'V'), ('X', 'K'),
('S', 'Z'), ('X', 'S'), ('Q', 'C'), ('U', 'V'), ('M', 'N'), ('L', 'I'),
('Q', 'O'), ('P', 'R'), ('I', 'J'), ('2', 'Z'), ('5', 'S'), ('8', 'B'),
('1', 'I'), ('1', 'L'), ('0', 'O'), ('0', 'Q'), ('C', 'K'), ('G', 'J'),
)
def __init__(self, long_strings=False, external=True):
self.long_strings = long_strings
self.external = external
def maximum(self, *sequences):
return 1
@staticmethod
def _in_range(char):
return 0 < ord(char) < 91
def __call__(self, s1, s2):
s1 = s1.strip().upper()
s2 = s2.strip().upper()
result = self.quick_answer(s1, s2)
if result is not None:
return result
len_s1 = len(s1)
len_s2 = len(s2)
adjwt = defaultdict(int)
# Initialize the adjwt array on the first call to the function only.
# The adjwt array is used to give partial credit for characters that
# may be errors due to known phonetic or character recognition errors.
# A typical example is to match the letter "O" with the number "0"
for c1, c2 in self.sp_mx:
adjwt[c1, c2] = 3
adjwt[c2, c1] = 3
if len_s1 > len_s2:
search_range = len_s1
minv = len_s2
else:
search_range = len_s2
minv = len_s1
# Blank out the flags
s1_flag = [0] * search_range
s2_flag = [0] * search_range
search_range = max(0, search_range // 2 - 1)
# Looking only within the search range, count and flag the matched pairs.
num_com = 0
yl1 = len_s2 - 1
for i, sc1 in enumerate(s1):
lowlim = max(i - search_range, 0)
hilim = min(i + search_range, yl1)
for j in range(lowlim, hilim + 1):
if s2_flag[j] == 0 and s2[j] == sc1:
s2_flag[j] = 1
s1_flag[i] = 1
num_com += 1
break
# If no characters in common - return
if num_com == 0:
return 0.0
# Count the number of transpositions
k = n_trans = 0
for i, sc1 in enumerate(s1):
if not s1_flag[i]:
continue
for j in range(k, len_s2):
if s2_flag[j] != 0:
k = j + 1
break
if sc1 != s2[j]:
n_trans += 1
n_trans = n_trans // 2
# Adjust for similarities in unmatched characters
n_simi = 0
if minv > num_com:
for i in range(len_s1):
if s1_flag[i] != 0:
continue
if not self._in_range(s1[i]):
continue
for j in range(len_s2):
if s2_flag[j] != 0:
continue
if not self._in_range(s2[j]):
continue
if (s1[i], s2[j]) not in adjwt:
continue
n_simi += adjwt[s1[i], s2[j]]
s2_flag[j] = 2
break
num_sim = n_simi / 10.0 + num_com
# Main weight computation
weight = num_sim / len_s1 + num_sim / len_s2
weight += (num_com - n_trans) / num_com
weight = weight / 3.0
# Continue to boost the weight if the strings are similar
if weight <= 0.7:
return weight
# Adjust for having up to the first 4 characters in common
j = min(minv, 4)
i = 0
for sc1, sc2 in zip(s1, s2):
if i >= j:
break
if sc1 != sc2:
break
if sc1.isdigit():
break
i += 1
if i:
weight += i * 0.1 * (1.0 - weight)
# Optionally adjust for long strings.
# After agreeing beginning chars, at least two more must agree and
# the agreeing characters must be > .5 of remaining characters.
if not self.long_strings:
return weight
if minv <= 4:
return weight
if num_com <= i + 1 or 2 * num_com < minv + i:
return weight
if s1[0].isdigit():
return weight
res = (num_com - i - 1) / (len_s1 + len_s2 - i * 2 + 2)
weight += (1.0 - weight) * res
return weight
class MLIPNS(_BaseSimilarity):
"""
Compute the Hamming distance between the two or more sequences.
The Hamming distance is the number of differing items in ordered sequences.
http://www.sial.iias.spb.su/files/386-386-1-PB.pdf
https://github.com/Yomguithereal/talisman/blob/master/src/metrics/distance/mlipns.js
"""
def __init__(self, threshold=0.25, maxmismatches=2, qval=1, external=True):
self.qval = qval
self.threshold = threshold
self.maxmismatches = maxmismatches
self.external = external
def maximum(self, *sequences):
return 1
def __call__(self, *sequences):
sequences = self._get_sequences(*sequences)
result = self.quick_answer(*sequences)
if result is not None:
return result
mismatches = 0
ham = Hamming()(*sequences)
maxlen = max(map(len, sequences))
while all(sequences) and mismatches <= self.maxmismatches:
if not maxlen:
return 1
if 1 - (maxlen - ham) / maxlen <= self.threshold:
return 1
mismatches += 1
ham -= 1
maxlen -= 1
if not maxlen:
return 1
return 0
hamming = Hamming()
levenshtein = Levenshtein()
damerau = damerau_levenshtein = DamerauLevenshtein()
jaro = Jaro()
jaro_winkler = JaroWinkler()
needleman_wunsch = NeedlemanWunsch()
smith_waterman = SmithWaterman()
gotoh = Gotoh()
strcmp95 = StrCmp95()
mlipns = MLIPNS()
| 33.02459 | 96 | 0.534128 |
acf9f161d68093cdb67c669fc782dcc4c0c115c4 | 1,992 | py | Python | magni/utils/_util.py | SIP-AAU/Magni | 6328dc98a273506f433af52e6bd394754a844550 | [
"BSD-2-Clause"
] | 42 | 2015-02-09T10:17:26.000Z | 2021-12-21T09:38:04.000Z | magni/utils/_util.py | SIP-AAU/Magni | 6328dc98a273506f433af52e6bd394754a844550 | [
"BSD-2-Clause"
] | 3 | 2015-03-20T12:00:40.000Z | 2015-03-20T12:01:16.000Z | magni/utils/_util.py | SIP-AAU/Magni | 6328dc98a273506f433af52e6bd394754a844550 | [
"BSD-2-Clause"
] | 14 | 2015-04-28T03:08:32.000Z | 2021-07-24T13:29:24.000Z | """
..
Copyright (c) 2014-2017, Magni developers.
All rights reserved.
See LICENSE.rst for further information.
Module providing the public function of the magni.utils subpackage.
"""
from __future__ import division
import os
from magni.utils.validation import decorate_validation as _decorate_validation
from magni.utils.validation import validate_generic as _generic
def split_path(path):
"""
Split a path into folder path, file name, and file extension.
The returned folder path ends with a folder separation character while the
returned file extension starts with an extension separation character. The
function is independent of the operating system and thus of the use of
folder separation character and extension separation character.
Parameters
----------
path : str
The path of the file either absolute or relative to the current working
directory.
Returns
-------
path : str
The path of the containing folder of the input path.
name : str
The name of the object which the input path points to.
ext : str
The extension of the object which the input path points to (if any).
Examples
--------
Concatenate a dummy path and split it using the present function:
>>> import os
>>> from magni.utils._util import split_path
>>> path = 'folder' + os.sep + 'file' + os.path.extsep + 'extension'
>>> parts = split_path(path)
>>> print(tuple((parts[0][-7:-1], parts[1], parts[2][1:])))
('folder', 'file', 'extension')
"""
@_decorate_validation
def validate_input():
_generic('path', 'string')
validate_input()
path = os.path.realpath(str(path))
pos = str.rfind(path, os.path.sep) + 1
path, name = path[:pos], path[pos:]
if os.path.extsep in name:
pos = str.rfind(name, os.path.extsep)
name, ext = name[:pos], name[pos:]
else:
ext = ''
return (path, name, ext)
| 27.287671 | 79 | 0.656124 |
acf9f17ce400af4c8ee8bff7436a970e9c121e68 | 1,037 | py | Python | compss/programming_model/bindings/python/src/pycompss/tests/api/dummy/test_dummy_reduction.py | alexbarcelo/compss | d619faa70ac5a933543c6f8ef65e8acd18ae37a0 | [
"Apache-2.0"
] | 31 | 2018-03-06T09:30:03.000Z | 2022-03-23T09:51:05.000Z | compss/programming_model/bindings/python/src/pycompss/tests/api/dummy/test_dummy_reduction.py | alexbarcelo/compss | d619faa70ac5a933543c6f8ef65e8acd18ae37a0 | [
"Apache-2.0"
] | 3 | 2020-08-28T17:16:50.000Z | 2021-11-11T21:58:02.000Z | compss/programming_model/bindings/python/src/pycompss/tests/api/dummy/test_dummy_reduction.py | alexbarcelo/compss | d619faa70ac5a933543c6f8ef65e8acd18ae37a0 | [
"Apache-2.0"
] | 15 | 2018-06-07T10:03:27.000Z | 2022-02-23T14:59:42.000Z | #!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
from pycompss.api.dummy.reduction import Reduction
from pycompss.api.dummy.task import Task
@Reduction()
@Task()
def increment(value):
return value + 1
def test_dummy_task():
result = increment(1)
assert result == 2, (
"Unexpected result provided by the dummy task decorator. Expected: 2 Received: " + # noqa: E501
str(result)
)
| 28.805556 | 104 | 0.713597 |
acf9f1bdc9d72694d19467bdad9d8e145c7c6904 | 1,549 | py | Python | unet/unet_base_multi_arch.py | sudhamsugurijala/Sat_Image_Seg | d5bc530534e834bac6151aa5b420a3af6be5b363 | [
"MIT"
] | null | null | null | unet/unet_base_multi_arch.py | sudhamsugurijala/Sat_Image_Seg | d5bc530534e834bac6151aa5b420a3af6be5b363 | [
"MIT"
] | null | null | null | unet/unet_base_multi_arch.py | sudhamsugurijala/Sat_Image_Seg | d5bc530534e834bac6151aa5b420a3af6be5b363 | [
"MIT"
] | null | null | null | from unet.unet_helper import *
######################### BASELINE VANILLA UNET #########################################
def uNet(img_height=IMG_HEIGHT, img_width=IMG_WIDTH, nclasses=NUM_CLASSES, filters=16):
# Contraction Path
input_layer = Input(shape=(img_height, img_width, 3), name='image_input')
conv1 = conv_block(input_layer, nfilters=filters)
conv1_out = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = conv_block(conv1_out, nfilters=filters*2)
conv2_out = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = conv_block(conv2_out, nfilters=filters*4)
conv3_out = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = conv_block(conv3_out, nfilters=filters*8)
conv4_out = MaxPooling2D(pool_size=(2, 2))(conv4)
conv4_out = Dropout(0.5)(conv4_out)
conv5 = conv_block(conv4_out, nfilters=filters*16)
conv5 = Dropout(0.5)(conv5)
# Expansion Path
deconv6 = deconv_block(conv5, residual=conv4, nfilters=filters*8)
deconv6 = Dropout(0.5)(deconv6)
deconv7 = deconv_block(deconv6, residual=conv3, nfilters=filters*4)
deconv7 = Dropout(0.5)(deconv7)
deconv8 = deconv_block(deconv7, residual=conv2, nfilters=filters*2)
deconv9 = deconv_block(deconv8, residual=conv1, nfilters=filters)
# output
output_layer = Conv2D(filters=NUM_CLASSES, kernel_size=(1, 1))(deconv9)
output_layer = BatchNormalization()(output_layer)
output_layer = Activation('softmax')(output_layer)
model = Model(inputs=input_layer, outputs=output_layer, name='Unet')
return model
| 41.864865 | 89 | 0.690768 |
acf9f20fc0a626bbeac5a81d3d9c4a3ee5a79094 | 594 | py | Python | example/metrics/management/commands/load_temperature.py | SyedAthar03/dj_with_TmscDB | f9f12d06df7a06e408290030f9a26266f91ae5fb | [
"Apache-2.0"
] | 91 | 2020-11-15T02:35:57.000Z | 2022-03-14T23:06:47.000Z | example/metrics/management/commands/load_temperature.py | SyedAthar03/dj_with_TmscDB | f9f12d06df7a06e408290030f9a26266f91ae5fb | [
"Apache-2.0"
] | 31 | 2020-11-16T10:47:19.000Z | 2022-03-14T21:09:02.000Z | example/metrics/management/commands/load_temperature.py | SyedAthar03/dj_with_TmscDB | f9f12d06df7a06e408290030f9a26266f91ae5fb | [
"Apache-2.0"
] | 18 | 2020-12-07T21:09:59.000Z | 2022-03-14T20:53:33.000Z | from django.core.management.base import BaseCommand, CommandError
from metrics.models import Metric
from django.utils import timezone
from random import uniform, choice
from datetime import timedelta
class Command(BaseCommand):
help = 'Uses PSUTILS to read any temperature sensor and adds a record'
DEVICES = [1234, 1245, 1236]
def handle(self, *args, **options):
for i in range(1000):
timestamp = timezone.now() - timedelta(minutes=i * 5)
Metric.objects.create(time=timestamp, temperature=uniform(51.1, 53.3), device=choice(self.DEVICES))
| 39.6 | 111 | 0.713805 |
acf9f2802e3d6a0380038fc03fc9db5028bfe741 | 502 | py | Python | tests/r/test_lukas.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 199 | 2017-07-24T01:34:27.000Z | 2022-01-29T00:50:55.000Z | tests/r/test_lukas.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 46 | 2017-09-05T19:27:20.000Z | 2019-01-07T09:47:26.000Z | tests/r/test_lukas.py | hajime9652/observations | 2c8b1ac31025938cb17762e540f2f592e302d5de | [
"Apache-2.0"
] | 45 | 2017-07-26T00:10:44.000Z | 2022-03-16T20:44:59.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.lukas import lukas
def test_lukas():
"""Test module lukas.py by downloading
lukas.csv and testing shape of
extracted data has 85 rows and 4 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = lukas(test_path)
try:
assert x_train.shape == (85, 4)
except:
shutil.rmtree(test_path)
raise()
| 20.916667 | 43 | 0.747012 |
acf9f3eaac2f33c6dd924dace51e87ca4d29a3ef | 2,918 | py | Python | django_ulogin/models.py | rmehtije/django-ulogin-1 | 0a5ab84e9b32427be222f611fd3e7cbf49851655 | [
"MIT"
] | 9 | 2015-04-21T16:03:27.000Z | 2021-12-25T01:04:44.000Z | django_ulogin/models.py | rmehtije/django-ulogin-1 | 0a5ab84e9b32427be222f611fd3e7cbf49851655 | [
"MIT"
] | 20 | 2015-06-07T13:53:56.000Z | 2021-04-15T06:41:10.000Z | django_ulogin/models.py | rmehtije/django-ulogin-1 | 0a5ab84e9b32427be222f611fd3e7cbf49851655 | [
"MIT"
] | 19 | 2015-07-03T06:24:53.000Z | 2021-04-19T11:01:07.000Z | import uuid
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.utils.module_loading import import_string
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from django_ulogin import settings as s
AUTH_USER_MODEL = (
getattr(settings, 'ULOGIN_USER_MODEL', None) or
getattr(settings, 'AUTH_USER_MODEL', None) or
'auth.User'
)
class ULoginUser(models.Model):
user = models.ForeignKey(AUTH_USER_MODEL,
related_name='ulogin_users',
verbose_name=_('user'),
on_delete=models.CASCADE)
network = models.CharField(_('network'),
db_index=True,
max_length=255,
choices=s.ALLOWED_PROVIDERS)
identity = models.URLField(_('identity'),
db_index=True,
max_length=255)
uid = models.CharField(_('uid'),
db_index=True,
max_length=255)
date_created = models.DateTimeField(_('date created'),
editable=False,
default=now)
def __str__(self):
return str(self.user)
def get_delete_url(self):
return reverse('ulogin_identities_delete', args=[self.pk])
class Meta:
app_label = 'django_ulogin'
verbose_name = _('ulogin user')
verbose_name_plural = _('ulogin users')
unique_together = [('network', 'uid')]
def create_user(request, ulogin_response):
"""
This function creates a new "user" instance based on response
we got from ULOGIN.
You can invent your own behavior and make django-ulogin to use
it by specifing it in your Django's project settings module:
# settings.py
# ... a bunch of other settings
ULOGIN_CREATE_USER_CALLBACK = 'my_app.utils.my_own_ulogin_create_user'
Note, the function should accept two arguments named "request"
and "ulogin_response"
"""
# Custom behaviour
if s.CREATE_USER_CALLBACK is not None:
callback = import_string(s.CREATE_USER_CALLBACK)
if callable(callback):
return callback(request=request,
ulogin_response=ulogin_response)
raise ImproperlyConfigured(
"The ULOGIN_CREATE_USER_CALLBACK isn't a callable"
)
# Default behavior
User = get_user_model()
return User.objects.create_user(
username=uuid.uuid4().hex[:30],
password=get_random_string(10, '0123456789abcdefghijklmnopqrstuvwxyz'),
email=''
)
| 31.376344 | 79 | 0.620973 |
acf9f3ef4b1e10a43d818ac49a84ca1994b0bd45 | 687 | py | Python | test/AddressClassifier_test.py | WemyJu/TOC_proj | 465a8c88f8abb3e00a36765524a6d2789114c2d1 | [
"MIT"
] | 1 | 2021-09-03T15:53:01.000Z | 2021-09-03T15:53:01.000Z | test/AddressClassifier_test.py | WemyJu/TOC_proj | 465a8c88f8abb3e00a36765524a6d2789114c2d1 | [
"MIT"
] | null | null | null | test/AddressClassifier_test.py | WemyJu/TOC_proj | 465a8c88f8abb3e00a36765524a6d2789114c2d1 | [
"MIT"
] | null | null | null | import unittest
import os
import sys
sys.path.append(os.path.join('../lib'))
from addressClassifier import AddressClassifier
import json
class outputTest(unittest.TestCase):
def test_classify(self):
ac = AddressClassifier()
f = open("./sample_data_for_AddressClassifier", "r")
sampleData = json.loads(f.read())
f.close()
ac.classify(sampleData)
self.assertEqual(len(ac.getClassifiedData()), 2, "classify error")
self.assertEqual(len(ac.getClassifiedData()['臺北市']), 2, "classify error")
self.assertEqual(len(ac.getClassifiedData()['臺北市']['文山區']), 9, "classify error")
if __name__ == '__main__':
unittest.main()
| 28.625 | 88 | 0.672489 |
acf9f4aa12fe31bd7225f696824684dfd9cbfba0 | 1,646 | py | Python | training/diagnostic.py | kerryvernebegeman/Kerry-Verne-Begeman | eb6ee851003d435c5658f9cc0a41d72ea8addceb | [
"MIT"
] | null | null | null | training/diagnostic.py | kerryvernebegeman/Kerry-Verne-Begeman | eb6ee851003d435c5658f9cc0a41d72ea8addceb | [
"MIT"
] | null | null | null | training/diagnostic.py | kerryvernebegeman/Kerry-Verne-Begeman | eb6ee851003d435c5658f9cc0a41d72ea8addceb | [
"MIT"
] | null | null | null | import pickle
import numpy as np
import tensorflow as tf
import dnnlib
import dnnlib.tflib as tflib
from dnnlib.tflib.autosummary import autosummary
from training import dataset
from training import misc
from metrics import metric_base
def create_initial_pkl(
G_args = {}, # Options for generator network.
D_args = {}, # Options for discriminator network.
tf_config = {}, # Options for tflib.init_tf().
config_id = "config-f", # config-f is the only one tested ...
num_channels = 3, # number of channels, e.g. 3 for RGB
resolution_h = 1024, # height dimension of real/fake images
resolution_w = 1024, # height dimension of real/fake images
label_size = 0, # number of labels for a conditional model
):
# Initialize dnnlib and TensorFlow.
tflib.init_tf(tf_config)
resolution = resolution_h # training_set.shape[1]
# Construct or load networks.
with tf.device('/gpu:0'):
print('Constructing networks...')
G = tflib.Network('G', num_channels=num_channels, resolution=resolution, label_size=label_size, **G_args)
D = tflib.Network('D', num_channels=num_channels, resolution=resolution, label_size=label_size, **D_args)
Gs = G.clone('Gs')
# Print layers and generate initial image snapshot.
G.print_layers(); D.print_layers()
pkl = 'network-initial-%s-%sx%s-%s.pkl' % (config_id, resolution_w, resolution_h, label_size)
misc.save_pkl((G, D, Gs), pkl)
print("Saving",pkl)
| 41.15 | 113 | 0.631835 |
acf9f4cb736f9fe381ff68f95d30b5628380cd95 | 22,410 | py | Python | apps/spectral_indices/tasks.py | lefkats94/dc | c0105ce78482957f0c32ed57548fb299dd800e51 | [
"Apache-2.0"
] | null | null | null | apps/spectral_indices/tasks.py | lefkats94/dc | c0105ce78482957f0c32ed57548fb299dd800e51 | [
"Apache-2.0"
] | null | null | null | apps/spectral_indices/tasks.py | lefkats94/dc | c0105ce78482957f0c32ed57548fb299dd800e51 | [
"Apache-2.0"
] | null | null | null | from django.db.models import F
from celery.task import task
from celery import chain, group, chord
from celery.utils.log import get_task_logger
from datetime import datetime, timedelta
import xarray as xr
import os
import stringcase
from utils.data_cube_utilities.data_access_api import DataAccessApi
from utils.data_cube_utilities.dc_utilities import (create_cfmask_clean_mask, create_bit_mask, write_geotiff_from_xr,
write_png_from_xr, write_single_band_png_from_xr,
add_timestamp_data_to_xr, clear_attrs)
from utils.data_cube_utilities.dc_chunker import (create_geographic_chunks, create_time_chunks,
combine_geographic_chunks)
from apps.dc_algorithm.utils import create_2d_plot, _get_datetime_range_containing
from utils.data_cube_utilities.import_export import export_xarray_to_netcdf
from .models import SpectralIndicesTask
from apps.dc_algorithm.models import Satellite
from apps.dc_algorithm.tasks import DCAlgorithmBase, check_cancel_task, task_clean_up
logger = get_task_logger(__name__)
spectral_indices_map = {
'ndvi': lambda ds: (ds.nir - ds.red) / (ds.nir + ds.red),
'evi': lambda ds: 2.5 * (ds.nir - ds.red) / (ds.nir + 6 * ds.red - 7.5 * ds.blue + 1),
'savi': lambda ds: (ds.nir - ds.red) / (ds.nir + ds.red + 0.5) * (1.5),
'nbr': lambda ds: (ds.nir - ds.swir2) / (ds.nir + ds.swir2),
'nbr2': lambda ds: (ds.swir1 - ds.swir2) / (ds.swir1 + ds.swir2),
'ndwi': lambda ds: (ds.nir - ds.swir1) / (ds.nir + ds.swir1),
'ndbi': lambda ds: (ds.swir1 - ds.nir) / (ds.nir + ds.swir1),
}
class BaseTask(DCAlgorithmBase):
app_name = 'spectral_indices'
@task(name="spectral_indices.pixel_drill", base=BaseTask)
def pixel_drill(task_id=None):
parameters = parse_parameters_from_task(task_id=task_id)
validate_parameters(parameters, task_id=task_id)
task = SpectralIndicesTask.objects.get(pk=task_id)
if task.status == "ERROR":
return None
dc = DataAccessApi(config=task.config_path)
single_pixel = dc.get_dataset_by_extent(**parameters).isel(latitude=0, longitude=0)
clear_mask = task.satellite.get_clean_mask_func()(single_pixel)
single_pixel = single_pixel.where(single_pixel != task.satellite.no_data_value)
dates = single_pixel.time.values
if len(dates) < 2:
task.update_status("ERROR", "There is only a single acquisition for your parameter set.")
return None
# spectral_indices_map = {
# 'ndvi': lambda ds: (ds.nir - ds.red) / (ds.nir + ds.red),
# 'evi': lambda ds: 2.5 * (ds.nir - ds.red) / (ds.nir + 6 * ds.red - 7.5 * ds.blue + 1),
# 'savi': lambda ds: (ds.nir - ds.red) / (ds.nir + ds.red + 0.5) * (1.5),
# 'nbr': lambda ds: (ds.nir - ds.swir2) / (ds.nir + ds.swir2),
# 'nbr2': lambda ds: (ds.swir1 - ds.swir2) / (ds.swir1 + ds.swir2),
# 'ndwi': lambda ds: (ds.nir - ds.swir1) / (ds.nir + ds.swir1),
# 'ndbi': lambda ds: (ds.swir1 - ds.nir) / (ds.nir + ds.swir1),
# }
for spectral_index in spectral_indices_map:
single_pixel[spectral_index] = spectral_indices_map[spectral_index](single_pixel)
exclusion_list = task.satellite.get_measurements()
plot_measurements = [band for band in single_pixel.data_vars if band not in exclusion_list]
datasets = [single_pixel[band].values.transpose() for band in plot_measurements] + [clear_mask]
data_labels = [stringcase.uppercase("{}".format(band)) for band in plot_measurements] + ["Clear"]
titles = [stringcase.uppercase("{}".format(band)) for band in plot_measurements] + ["Clear Mask"]
style = ['ro', 'go', 'bo', 'co', 'mo', 'yo', 'ko', '.']
task.plot_path = os.path.join(task.get_result_path(), "plot_path.png")
create_2d_plot(task.plot_path, dates=dates, datasets=datasets, data_labels=data_labels, titles=titles, style=style)
task.complete = True
task.update_status("OK", "Done processing pixel drill.")
@task(name="spectral_indices.run", base=BaseTask)
def run(task_id=None):
"""Responsible for launching task processing using celery asynchronous processes
Chains the parsing of parameters, validation, chunking, and the start to data processing.
"""
return chain(parse_parameters_from_task.s(task_id=task_id),
validate_parameters.s(task_id=task_id),
perform_task_chunking.s(task_id=task_id),
start_chunk_processing.s(task_id=task_id))()
@task(name="spectral_indices.parse_parameters_from_task", base=BaseTask, bind=True)
def parse_parameters_from_task(self, task_id=None):
"""Parse out required DC parameters from the task model.
See the DataAccessApi docstrings for more information.
Parses out platforms, products, etc. to be used with DataAccessApi calls.
If this is a multisensor app, platform and product should be pluralized and used
with the get_stacked_datasets_by_extent call rather than the normal get.
Returns:
parameter dict with all keyword args required to load data.
"""
task = SpectralIndicesTask.objects.get(pk=task_id)
parameters = {
'platform': task.satellite.datacube_platform,
'product': task.satellite.get_products(task.area_id)[0],
'time': (task.time_start, task.time_end),
'longitude': (task.longitude_min, task.longitude_max),
'latitude': (task.latitude_min, task.latitude_max),
'measurements': task.satellite.get_measurements()
}
task.execution_start = datetime.now()
if check_cancel_task(self, task): return
task.update_status("WAIT", "Parsed out parameters.")
return parameters
@task(name="spectral_indices.validate_parameters", base=BaseTask, bind=True)
def validate_parameters(self, parameters, task_id=None):
"""Validate parameters generated by the parameter parsing task
All validation should be done here - are there data restrictions?
Combinations that aren't allowed? etc.
Returns:
parameter dict with all keyword args required to load data.
-or-
updates the task with ERROR and a message, returning None
"""
task = SpectralIndicesTask.objects.get(pk=task_id)
if check_cancel_task(self, task): return
dc = DataAccessApi(config=task.config_path)
#validate for any number of criteria here - num acquisitions, etc.
acquisitions = dc.list_acquisition_dates(**parameters)
if len(acquisitions) < 1:
task.complete = True
task.update_status("ERROR", "There are no acquistions for this parameter set.")
return None
if not task.compositor.is_iterative() and (task.time_end - task.time_start).days > 367:
task.complete = True
task.update_status("ERROR", "Median pixel operations are only supported for single year time periods.")
return None
if check_cancel_task(self, task): return
task.update_status("WAIT", "Validated parameters.")
if not dc.validate_measurements(parameters['product'], parameters['measurements']):
task.complete = True
task.update_status(
"ERROR",
"The provided Satellite model measurements aren't valid for the product. Please check the measurements listed in the {} model.".
format(task.satellite.name))
return None
dc.close()
return parameters
@task(name="spectral_indices.perform_task_chunking", base=BaseTask, bind=True)
def perform_task_chunking(self, parameters, task_id=None):
"""Chunk parameter sets into more manageable sizes
Uses functions provided by the task model to create a group of
parameter sets that make up the arg.
Args:
parameters: parameter stream containing all kwargs to load data
Returns:
parameters with a list of geographic and time ranges
"""
if parameters is None:
return None
task = SpectralIndicesTask.objects.get(pk=task_id)
if check_cancel_task(self, task): return
dc = DataAccessApi(config=task.config_path)
dates = dc.list_acquisition_dates(**parameters)
task_chunk_sizing = task.get_chunk_size()
geographic_chunks = create_geographic_chunks(
longitude=parameters['longitude'],
latitude=parameters['latitude'],
geographic_chunk_size=task_chunk_sizing['geographic'])
time_chunks = create_time_chunks(
dates, _reversed=task.get_reverse_time(), time_chunk_size=task_chunk_sizing['time'])
logger.info("Time chunks: {}, Geo chunks: {}".format(len(time_chunks), len(geographic_chunks)))
dc.close()
if check_cancel_task(self, task): return
task.update_status("WAIT", "Chunked parameter set.")
return {'parameters': parameters, 'geographic_chunks': geographic_chunks, 'time_chunks': time_chunks}
@task(name="spectral_indices.start_chunk_processing", base=BaseTask, bind=True)
def start_chunk_processing(self, chunk_details, task_id=None):
"""Create a fully asyncrhonous processing pipeline from paramters and a list of chunks.
The most efficient way to do this is to create a group of time chunks for each geographic chunk,
recombine over the time index, then combine geographic last.
If we create an animation, this needs to be reversed - e.g. group of geographic for each time,
recombine over geographic, then recombine time last.
The full processing pipeline is completed, then the create_output_products task is triggered, completing the task.
"""
if chunk_details is None:
return None
parameters = chunk_details.get('parameters')
geographic_chunks = chunk_details.get('geographic_chunks')
time_chunks = chunk_details.get('time_chunks')
task = SpectralIndicesTask.objects.get(pk=task_id)
# Track task progress.
num_scenes = len(geographic_chunks) * sum([len(time_chunk) for time_chunk in time_chunks])
# Scene processing progress is tracked in processing_task().
task.total_scenes = num_scenes
task.scenes_processed = 0
task.save(update_fields=['total_scenes', 'scenes_processed'])
if check_cancel_task(self, task): return
task.update_status("WAIT", "Starting processing.")
logger.info("START_CHUNK_PROCESSING")
processing_pipeline = (group([
group([
processing_task.s(
task_id=task_id,
geo_chunk_id=geo_index,
time_chunk_id=time_index,
geographic_chunk=geographic_chunk,
time_chunk=time_chunk,
**parameters) for time_index, time_chunk in enumerate(time_chunks)
]) | recombine_time_chunks.s(task_id=task_id) | process_band_math.s(task_id=task_id)
for geo_index, geographic_chunk in enumerate(geographic_chunks)
]) | recombine_geographic_chunks.s(task_id=task_id)
| create_output_products.s(task_id=task_id)
| task_clean_up.si(task_id=task_id, task_model='SpectralIndicesTask')).apply_async()
return True
@task(name="spectral_indices.processing_task", acks_late=True, base=BaseTask, bind=True)
def processing_task(self,
task_id=None,
geo_chunk_id=None,
time_chunk_id=None,
geographic_chunk=None,
time_chunk=None,
**parameters):
"""Process a parameter set and save the results to disk.
Uses the geographic and time chunk id to identify output products.
**params is updated with time and geographic ranges then used to load data.
the task model holds the iterative property that signifies whether the algorithm
is iterative or if all data needs to be loaded at once.
Args:
task_id, geo_chunk_id, time_chunk_id: identification for the main task and what chunk this is processing
geographic_chunk: range of latitude and longitude to load - dict with keys latitude, longitude
time_chunk: list of acquisition dates
parameters: all required kwargs to load data.
Returns:
path to the output product, metadata dict, and a dict containing the geo/time ids
"""
chunk_id = "_".join([str(geo_chunk_id), str(time_chunk_id)])
task = SpectralIndicesTask.objects.get(pk=task_id)
if check_cancel_task(self, task): return
logger.info("Starting chunk: " + chunk_id)
if not os.path.exists(task.get_temp_path()):
return None
metadata = {}
times = list(
map(_get_datetime_range_containing, time_chunk)
if task.get_iterative() else [_get_datetime_range_containing(time_chunk[0], time_chunk[-1])])
dc = DataAccessApi(config=task.config_path)
updated_params = parameters
updated_params.update(geographic_chunk)
iteration_data = None
for time_index, time in enumerate(times):
updated_params.update({'time': time})
data = dc.get_dataset_by_extent(**updated_params)
if check_cancel_task(self, task): return
if data is None:
logger.info("Empty chunk.")
continue
if 'time' not in data:
logger.info("Invalid chunk.")
continue
clear_mask = task.satellite.get_clean_mask_func()(data)
add_timestamp_data_to_xr(data)
metadata = task.metadata_from_dataset(metadata, data, clear_mask, updated_params)
iteration_data = task.get_processing_method()(data,
clean_mask=clear_mask,
intermediate_product=iteration_data,
no_data=task.satellite.no_data_value,
reverse_time=task.get_reverse_time())
if check_cancel_task(self, task): return
task.scenes_processed = F('scenes_processed') + 1
task.save(update_fields=['scenes_processed'])
if iteration_data is None:
return None
path = os.path.join(task.get_temp_path(), chunk_id + ".nc")
export_xarray_to_netcdf(iteration_data, path)
dc.close()
logger.info("Done with chunk: " + chunk_id)
return path, metadata, {'geo_chunk_id': geo_chunk_id, 'time_chunk_id': time_chunk_id}
@task(name="spectral_indices.recombine_time_chunks", base=BaseTask, bind=True)
def recombine_time_chunks(self, chunks, task_id=None):
"""Recombine processed chunks over the time index.
Open time chunked processed datasets and recombine them using the same function
that was used to process them. This assumes an iterative algorithm - if it is not, then it will
simply return the data again.
Args:
chunks: list of the return from the processing_task function - path, metadata, and {chunk ids}
Returns:
path to the output product, metadata dict, and a dict containing the geo/time ids
"""
task = SpectralIndicesTask.objects.get(pk=task_id)
if check_cancel_task(self, task): return
#sorting based on time id - earlier processed first as they're incremented e.g. 0, 1, 2..
chunks = chunks if isinstance(chunks, list) else [chunks]
chunks = [chunk for chunk in chunks if chunk is not None]
if len(chunks) == 0:
return None
total_chunks = sorted(chunks, key=lambda x: x[0])
geo_chunk_id = total_chunks[0][2]['geo_chunk_id']
time_chunk_id = total_chunks[0][2]['time_chunk_id']
metadata = {}
combined_data = None
for index, chunk in enumerate(total_chunks):
metadata.update(chunk[1])
data = xr.open_dataset(chunk[0])
if combined_data is None:
combined_data = data
continue
#give time an indice to keep mosaicking from breaking.
data = xr.concat([data], 'time')
data['time'] = [0]
clear_mask = task.satellite.get_clean_mask_func()(data)
combined_data = task.get_processing_method()(data,
clean_mask=clear_mask,
intermediate_product=combined_data,
no_data=task.satellite.no_data_value,
reverse_time=task.get_reverse_time())
if check_cancel_task(self, task): return
if combined_data is None:
return None
path = os.path.join(task.get_temp_path(), "recombined_time_{}.nc".format(geo_chunk_id))
export_xarray_to_netcdf(combined_data, path)
logger.info("Done combining time chunks for geo: " + str(geo_chunk_id))
return path, metadata, {'geo_chunk_id': geo_chunk_id, 'time_chunk_id': time_chunk_id}
@task(name="spectral_indices.process_band_math", base=BaseTask, bind=True)
def process_band_math(self, chunk, task_id=None):
"""Apply some band math to a chunk and return the args
Opens the chunk dataset and applys some band math defined by _apply_band_math(dataset)
_apply_band_math creates some product using the bands already present in the dataset and
returns the dataarray. The data array is then appended under 'band_math', then saves the
result to disk in the same path as the nc file already exists.
"""
task = SpectralIndicesTask.objects.get(pk=task_id)
if check_cancel_task(self, task): return
# spectral_indices_map = {
# 'ndvi': lambda ds: (ds.nir - ds.red) / (ds.nir + ds.red),
# 'evi': lambda ds: 2.5 * (ds.nir - ds.red) / (ds.nir + 6 * ds.red - 7.5 * ds.blue + 1),
# 'savi': lambda ds: (ds.nir - ds.red) / (ds.nir + ds.red + 0.5) * (1.5),
# 'nbr': lambda ds: (ds.nir - ds.swir2) / (ds.nir + ds.swir2),
# 'nbr2': lambda ds: (ds.swir1 - ds.swir2) / (ds.swir1 + ds.swir2),
# 'ndwi': lambda ds: (ds.nir - ds.swir1) / (ds.nir + ds.swir1),
# 'ndbi': lambda ds: (ds.swir1 - ds.nir) / (ds.nir + ds.swir1),
# }
def _apply_band_math(dataset):
return spectral_indices_map[task.query_type.result_id](dataset)
if chunk is None:
return None
dataset = xr.open_dataset(chunk[0]).load()
dataset['band_math'] = _apply_band_math(dataset)
#remove previous nc and write band math to disk
os.remove(chunk[0])
export_xarray_to_netcdf(dataset, chunk[0])
return chunk
@task(name="spectral_indices.recombine_geographic_chunks", base=BaseTask, bind=True)
def recombine_geographic_chunks(self, chunks, task_id=None):
"""Recombine processed data over the geographic indices
For each geographic chunk process spawned by the main task, open the resulting dataset
and combine it into a single dataset. Combine metadata as well, writing to disk.
Args:
chunks: list of the return from the processing_task function - path, metadata, and {chunk ids}
Returns:
path to the output product, metadata dict, and a dict containing the geo/time ids
"""
task = SpectralIndicesTask.objects.get(pk=task_id)
if check_cancel_task(self, task): return
total_chunks = [chunks] if not isinstance(chunks, list) else chunks
total_chunks = [chunk for chunk in total_chunks if chunk is not None]
if len(total_chunks) == 0:
return None
geo_chunk_id = total_chunks[0][2]['geo_chunk_id']
time_chunk_id = total_chunks[0][2]['time_chunk_id']
metadata = {}
chunk_data = []
for index, chunk in enumerate(total_chunks):
metadata = task.combine_metadata(metadata, chunk[1])
chunk_data.append(xr.open_dataset(chunk[0]))
combined_data = combine_geographic_chunks(chunk_data)
path = os.path.join(task.get_temp_path(), "recombined_geo_{}.nc".format(time_chunk_id))
export_xarray_to_netcdf(combined_data, path)
logger.info("Done combining geographic chunks for time: " + str(time_chunk_id))
return path, metadata, {'geo_chunk_id': geo_chunk_id, 'time_chunk_id': time_chunk_id}
@task(name="spectral_indices.create_output_products", base=BaseTask, bind=True)
def create_output_products(self, data, task_id=None):
"""Create the final output products for this algorithm.
Open the final dataset and metadata and generate all remaining metadata.
Convert and write the dataset to variuos formats and register all values in the task model
Update status and exit.
Args:
data: tuple in the format of processing_task function - path, metadata, and {chunk ids}
"""
task = SpectralIndicesTask.objects.get(pk=task_id)
if check_cancel_task(self, task): return
full_metadata = data[1]
dataset = xr.open_dataset(data[0])
task.result_path = os.path.join(task.get_result_path(), "band_math.png")
task.mosaic_path = os.path.join(task.get_result_path(), "png_mosaic.png")
task.data_path = os.path.join(task.get_result_path(), "data_tif.tif")
task.data_netcdf_path = os.path.join(task.get_result_path(), "data_netcdf.nc")
task.final_metadata_from_dataset(dataset)
task.metadata_from_dict(full_metadata)
bands = task.satellite.get_measurements() + ['band_math']
export_xarray_to_netcdf(dataset, task.data_netcdf_path)
write_geotiff_from_xr(task.data_path, dataset.astype('int32'), bands=bands, no_data=task.satellite.no_data_value)
write_png_from_xr(
task.mosaic_path,
dataset,
bands=['red', 'green', 'blue'],
scale=task.satellite.get_scale(),
no_data=task.satellite.no_data_value)
write_single_band_png_from_xr(
task.result_path,
dataset,
band='band_math',
color_scale=task.color_scale_path.get(task.query_type.result_id),
no_data=task.satellite.no_data_value)
dates = list(map(lambda x: datetime.strptime(x, "%m/%d/%Y"), task._get_field_as_list('acquisition_list')))
if len(dates) > 1:
task.plot_path = os.path.join(task.get_result_path(), "plot_path.png")
create_2d_plot(
task.plot_path,
dates=dates,
datasets=task._get_field_as_list('clean_pixel_percentages_per_acquisition'),
data_labels="Clean Pixel Percentage (%)",
titles="Clean Pixel Percentage Per Acquisition")
logger.info("All products created.")
# task.update_bounds_from_dataset(dataset)
task.complete = True
task.execution_end = datetime.now()
task.update_status("OK", "All products have been generated. Your result will be loaded on the map.")
return True | 43.013436 | 140 | 0.682776 |
acf9f69f4dc3c09cb443f9d56b5dad1a01b8f7b1 | 10,651 | py | Python | mossspider/dgm.py | pzivich/MossSpider | 43cb6d22959afb47a9862f73754965473f42ddc1 | [
"MIT"
] | 1 | 2022-03-26T18:49:26.000Z | 2022-03-26T18:49:26.000Z | mossspider/dgm.py | pzivich/MossSpider | 43cb6d22959afb47a9862f73754965473f42ddc1 | [
"MIT"
] | null | null | null | mossspider/dgm.py | pzivich/MossSpider | 43cb6d22959afb47a9862f73754965473f42ddc1 | [
"MIT"
] | null | null | null | import warnings
import numpy as np
import networkx as nx
from scipy.stats import logistic
from mossspider.estimators.utils import fast_exp_map
def uniform_network(n, degree, pr_w=0.35, seed=None):
"""Generates a uniform random graph for a set number of nodes (n) and specified max and min degree (degree).
Additionally, assigns a binary baseline covariate, W, to each observation.
Parameters
----------
n : int
Number of nodes in the generated network
degree : list, set, array
An array of two elements. The first element is the minimum degree and the second element is the maximum degree.
pr_w : float, optional
Probability of W=1. W is a binary baseline covariate assigned to each unit.
seed : int, None, optional
Random seed to use. Default is None.
Returns
-------
networkx.Graph
Examples
--------
Loading the necessary functions
>>> from mossspider.dgm import uniform_network
Generating the uniform network
>>> G = uniform_network(n=500, degree=[0, 2])
"""
rng = np.random.default_rng(seed)
# Processing degree data
if len(degree) > 2:
warnings.warn('It looks like your specified bounds is more than two floats. Only the first two '
'specified bounds are used by the bound statement. So only ' +
str(degree[0:2]) + ' will be used', UserWarning)
if type(degree) is float or type(degree) is int or type(degree) is str:
raise ValueError("degree must be a container of integers")
elif degree[0] > degree[1]:
raise ValueError('degree thresholds must be listed in ascending order')
elif type(degree[0]) is str or type(degree[1]) is str:
raise ValueError('degree must be integers')
elif type(degree[0]) is float or type(degree[1]) is float:
raise ValueError('degree must be integers')
elif degree[0] < 0 or degree[1] < 0:
raise ValueError('Both degree values must be positive values')
else:
# Completed all checks
pass
# checking if even sum for degrees, since needed
sum = 1
while sum % 2 != 0: # Degree distribution must be even
degree_dist = list(rng.integers(degree[0], # ... proposed degree distribution for min degree
degree[1]+1, # ... and max degree (+1 to be inclusive)
size=n)) # ... for the n units
sum = np.sum(degree_dist) # ... update the sum value to see if valid
# Generate network with proposed degree distribution
G = nx.configuration_model(degree_dist, # Generate network
seed=seed) # ... with seed for consistency
# Removing multiple edges!
G = nx.Graph(G) # No multi-loops in networks we consider here
# Removing self-loops
G.remove_edges_from(nx.selfloop_edges(G)) # No self-loops in networks we consider here
# Generating baseline covariate W
w = rng.binomial(n=1, p=pr_w, size=n) # Generate W
for node in G.nodes(): # Adding W to the network node attributes
G.nodes[node]['W'] = w[node] # ... via simple indexing
# Returning the completed graph
return G
def clustered_power_law_network(n_cluster, edges=3, pr_cluster=0.75, pr_between=0.0007, pr_w=0.35, seed=None):
"""Generate a graph with the following features: follows a power-law degree distribution, high(er) clustering
coefficient, and an underlying community structure. This graph is created by generating a number of subgraphs with
power-law distributions and clustering. The subgraphs are generated using
``networkx.powerlaw_cluster_graph(n=n_cluster[...], m=edges, p=p_cluster)``. This process is repeated for each
element in the ``n_cluster`` argument. Then the subgraphs are then randomly connected by creating random edges
between nodes of the subgraphs.
Parameters
----------
n_cluster : list, set, array, ndarray
Specify the N for each subgraph in the clustered power-law network via a list. List should be positive integers
that correspond to the N for each subgraph.
edges : int, optional
Number of edges to generate within each cluster. Equivalent to the ``m`` argument in
``networkx.powerlaw_cluster_graph``.
pr_cluster : float, optional
Probability of a new node forming a triad with neighbors of connected nodes
pr_between : float, optional
Probability of an edge between nodes of each cluster. Evaluated for all node pairs, so should be relatively
low to keep a high community structure. Default is 0.0007.
pr_w : float, optional
Probability of the binary baseline covariate W for the network. Default is 0.35.
seed : int, None, optional
Random seed. Default is None.
Returns
-------
networkx.Graph
Examples
--------
Loading the necessary functions
>>> from mossspider.dgm import clustered_power_law_network
Generating the clustered power-law network
>>> G = clustered_power_law_network(n_cluster=[50, 50, 50, 50])
"""
# Prep environment
rng = np.random.default_rng(seed)
N = nx.Graph()
for i in range(len(n_cluster)):
# Generate the component / subgraph
G = nx.powerlaw_cluster_graph(int(n_cluster[i]),
m=edges,
p=pr_cluster,
seed=int(rng.integers(10000, 500000, size=1)[0]))
# Re-label nodes so no corresponding overlaps between node labels
if i == 0:
start_label = 0
else:
start_label = np.sum(n_cluster[:i])
mapping = {}
for j in range(n_cluster[i]):
mapping[j] = start_label + j
H = nx.relabel_nodes(G, mapping)
# Adding component / subgraph to overall network
N.add_nodes_from(H.nodes)
N.add_edges_from(H.edges)
# Creating some random connections across groups
for i in range(len(n_cluster)):
# Gettings IDs for the subgraph
first_id = int(np.sum(n_cluster[:i]))
last_id = int(np.sum(n_cluster[:i + 1]))
# Only adding edges to > last_id
for j in range(first_id + 1, last_id + 1):
for n in list(N.nodes()):
if n > last_id:
if rng.uniform(0, 1) < pr_between:
N.add_edge(j, n)
# Generating baseline covariate W
w = rng.binomial(n=1, p=pr_w, size=np.sum(n_cluster)) # Generate W
for node in N.nodes(): # Adding W to the network node attributes
N.nodes[node]['W'] = w[node] # ... via simple indexing
# Returning the generated network
return N
def generate_observed(graph, seed=None):
r"""Simulates the exposure and outcome for the uniform random graph (following mechanisms are from Sofrygin & van
der Laan 2017).
.. math::
A = \text{Bernoulli}(\text{expit}(-1.2 + 1.5 W + 0.6 W^s)) \\
Y = \text{Bernoulli}(\text{expit}(-2.5 + 0.5 A + 1.5 A^s + 1.5 W + 1.5 W^s))
Parameters
----------
graph : Graph
Graph generated by the `uniform_network` function.
seed : int, None, optional
Random seed to use. Default is None.
Returns
-------
Network object with node attributes
Examples
--------
Loading the necessary functions
>>> from mossspider.dgm import uniform_network, generate_observed
Generating the uniform network
>>> G = uniform_network(n=500, degree=[0, 2])
Generating exposure A and outcome Y for network
>>> H = generate_observed(graph=G)
References
----------
Sofrygin O, & van der Laan MJ. (2017). Semi-parametric estimation and inference for the mean outcome of the single
time-point intervention in a causally connected population. *Journal of Causal Inference*, 5(1).
"""
rng = np.random.default_rng(seed)
n = len(graph.nodes())
w = np.array([d['W'] for n, d in graph.nodes(data=True)])
adj_mat = nx.adjacency_matrix(graph)
# Calculating map(W), generating A, and adding to network
w_s = fast_exp_map(adj_mat,
w,
measure='sum')
a = rng.binomial(n=1, p=logistic.cdf(-1.2 + 1.5*w + 0.6*w_s), size=n)
for node in graph.nodes():
graph.nodes[node]['A'] = a[node]
# Calculating map(A), generating Y, and adding to network
a_s = fast_exp_map(adj_mat,
a,
measure='sum')
y = rng.binomial(n=1, p=logistic.cdf(-2.5 + 1.5*w + 0.5*a + 1.5*a_s + 1.5*w_s), size=n)
for node in graph.nodes():
graph.nodes[node]['Y'] = y[node]
return graph
def generate_truth(graph, p):
"""Simulates the true conditional mean outcome for a given network, distribution of W, and policy.
The true mean under the policy is simulated as
.. math::
A = Bernoulli(p) \\
Y = Bernoulli(expit(-2.5 + 1.5*W + 0.5*A + 1.5*map(A) + 1.5*map(W)))
Returns
-------
float
Examples
--------
Loading the necessary functions
>>> from mossspider.dgm import uniform_network, generate_truth
Generating the uniform network
>>> G = uniform_network(n=500, degree=[0, 2])
Calculating truth for a policy via a large number of replicates
>>> true_p = []
>>> for i in range(1000):
>>> y_mean = generate_truth(graph=G, p=0.5)
>>> true_p.append(y_mean)
>>> np.mean(true_p) # 'true' value for the stochastic policy
To reduce random error, a large number of replicates should be used
"""
n = len(graph.nodes())
w = np.array([d['W'] for n, d in graph.nodes(data=True)])
# Calculating map(W), generating A, and adding to network
a = np.random.binomial(n=1, p=p, size=n)
for node in graph.nodes():
graph.nodes[node]['A'] = a[node]
# Calculating map(A), generating Y, and adding to network
adj_mat = nx.adjacency_matrix(graph)
w_s = fast_exp_map(adj_mat,
w,
measure='sum')
a_s = fast_exp_map(adj_mat,
a,
measure='sum')
y = np.random.binomial(n=1, p=logistic.cdf(-2.5 + 1.5*w + 0.5*a + 1.5*a_s + 1.5*w_s), size=n)
return np.mean(y)
| 36.854671 | 119 | 0.604356 |
acf9f6d7eb89b75e59d527bf6eb2a98e1726ae60 | 13,071 | py | Python | bucky/collectd.py | CollabNet/puppet-bucky | 6c12cdba59ef18dc8d1b8ae3fa2c3296f1b74cae | [
"Apache-2.0"
] | null | null | null | bucky/collectd.py | CollabNet/puppet-bucky | 6c12cdba59ef18dc8d1b8ae3fa2c3296f1b74cae | [
"Apache-2.0"
] | null | null | null | bucky/collectd.py | CollabNet/puppet-bucky | 6c12cdba59ef18dc8d1b8ae3fa2c3296f1b74cae | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import os
import six
import copy
import struct
import logging
from bucky.errors import ConfigError, ProtocolError
from bucky.udpserver import UDPServer
log = logging.getLogger(__name__)
class CPUConverter(object):
PRIORITY = -1
def __call__(self, sample):
return ["cpu", sample["plugin_instance"], sample["type_instance"]]
class InterfaceConverter(object):
PRIORITY = -1
def __call__(self, sample):
return filter(None, [
"interface",
sample.get("plugin_instance", ""),
sample.get("type_instance", ""),
sample["type"],
sample["value_name"]
])
class MemoryConverter(object):
PRIORITY = -1
def __call__(self, sample):
return ["memory", sample["type_instance"]]
class DefaultConverter(object):
PRIORITY = -1
def __call__(self, sample):
parts = []
parts.append(sample["plugin"].strip())
if sample.get("plugin_instance"):
parts.append(sample["plugin_instance"].strip())
stype = sample.get("type", "").strip()
if stype and stype != "value":
parts.append(stype)
stypei = sample.get("type_instance", "").strip()
if stypei:
parts.append(stypei)
vname = sample.get("value_name").strip()
if vname and vname != "value":
parts.append(vname)
return parts
DEFAULT_CONVERTERS = {
"cpu": CPUConverter(),
"interface": InterfaceConverter(),
"memory": MemoryConverter(),
"_default": DefaultConverter(),
}
class CollectDTypes(object):
def __init__(self, types_dbs=[]):
self.types = {}
self.type_ranges = {}
if not types_dbs:
types_dbs = filter(os.path.exists, [
"/usr/share/collectd/types.db",
"/usr/local/share/collectd/types.db",
])
if not types_dbs:
raise ConfigError("Unable to locate types.db")
self.types_dbs = types_dbs
self._load_types()
def get(self, name):
t = self.types.get(name)
if t is None:
raise ProtocolError("Invalid type name: %s" % name)
return t
def _load_types(self):
for types_db in self.types_dbs:
with open(types_db) as handle:
for line in handle:
if line.lstrip()[:1] == "#":
continue
if not line.strip():
continue
self._add_type_line(line)
log.info("Loaded collectd types from %s", types_db)
def _add_type_line(self, line):
types = {
"COUNTER": 0,
"GAUGE": 1,
"DERIVE": 2,
"ABSOLUTE": 3
}
name, spec = line.split(None, 1)
self.types[name] = []
self.type_ranges[name] = {}
vals = spec.split(", ")
for val in vals:
vname, vtype, minv, maxv = val.strip().split(":")
vtype = types.get(vtype)
if vtype is None:
raise ValueError("Invalid value type: %s" % vtype)
minv = None if minv == "U" else float(minv)
maxv = None if maxv == "U" else float(maxv)
self.types[name].append((vname, vtype))
self.type_ranges[name][vname] = (minv, maxv)
class CollectDParser(object):
def __init__(self, types_dbs=[]):
self.types = CollectDTypes(types_dbs=types_dbs)
def parse(self, data):
for sample in self.parse_samples(data):
yield sample
def parse_samples(self, data):
types = {
0x0000: self._parse_string("host"),
0x0001: self._parse_time("time"),
0x0008: self._parse_time_hires("time"),
0x0002: self._parse_string("plugin"),
0x0003: self._parse_string("plugin_instance"),
0x0004: self._parse_string("type"),
0x0005: self._parse_string("type_instance"),
0x0006: None, # handle specially
0x0007: self._parse_time("interval"),
0x0009: self._parse_time_hires("interval")
}
sample = {}
for (ptype, data) in self.parse_data(data):
if ptype not in types:
log.debug("Ignoring part type: 0x%02x", ptype)
continue
if ptype != 0x0006:
types[ptype](sample, data)
continue
for vname, vtype, val in self.parse_values(sample["type"], data):
sample["value_name"] = vname
sample["value_type"] = vtype
sample["value"] = val
yield copy.deepcopy(sample)
def parse_data(self, data):
types = set([
0x0000, 0x0001, 0x0002, 0x0003, 0x0004,
0x0005, 0x0006, 0x0007, 0x0008, 0x0009,
0x0100, 0x0101, 0x0200, 0x0210
])
while len(data) > 0:
if len(data) < 4:
raise ProtocolError("Truncated header.")
(part_type, part_len) = struct.unpack("!HH", data[:4])
data = data[4:]
if part_type not in types:
raise ProtocolError("Invalid part type: 0x%02x" % part_type)
part_len -= 4 # includes four header bytes we just parsed
if len(data) < part_len:
raise ProtocolError("Truncated value.")
part_data, data = data[:part_len], data[part_len:]
yield (part_type, part_data)
def parse_values(self, stype, data):
types = {0: "!Q", 1: "<d", 2: "!q", 3: "!Q"}
(nvals,) = struct.unpack("!H", data[:2])
data = data[2:]
if len(data) != 9 * nvals:
raise ProtocolError("Invalid value structure length.")
vtypes = self.types.get(stype)
if nvals != len(vtypes):
raise ProtocolError("Values different than types.db info.")
for i in range(nvals):
if six.PY3:
vtype = data[i]
else:
(vtype,) = struct.unpack("B", data[i])
if vtype != vtypes[i][1]:
raise ProtocolError("Type mismatch with types.db")
data = data[nvals:]
for i in range(nvals):
vdata, data = data[:8], data[8:]
(val,) = struct.unpack(types[vtypes[i][1]], vdata)
yield vtypes[i][0], vtypes[i][1], val
def _parse_string(self, name):
def _parser(sample, data):
if six.PY3:
data = data.decode()
if data[-1] != '\0':
raise ProtocolError("Invalid string detected.")
sample[name] = data[:-1]
return _parser
def _parse_time(self, name):
def _parser(sample, data):
if len(data) != 8:
raise ProtocolError("Invalid time data length.")
(val,) = struct.unpack("!Q", data)
sample[name] = float(val)
return _parser
def _parse_time_hires(self, name):
def _parser(sample, data):
if len(data) != 8:
raise ProtocolError("Invalid hires time data length.")
(val,) = struct.unpack("!Q", data)
sample[name] = val * (2 ** -30)
return _parser
class CollectDConverter(object):
def __init__(self, cfg):
self.converters = dict(DEFAULT_CONVERTERS)
self._load_converters(cfg)
def convert(self, sample):
default = self.converters["_default"]
handler = self.converters.get(sample["plugin"], default)
try:
name = '.'.join(handler(sample))
if name is None:
return # treat None as "ignore sample"
except:
log.exception("Exception in sample handler %s (%s):", sample["plugin"], handler)
return
host = sample.get("host", "")
return (
host,
name,
sample["value_type"],
sample["value"],
int(sample["time"])
)
def _load_converters(self, cfg):
cfg_conv = cfg.collectd_converters
for conv in cfg_conv:
self._add_converter(conv, cfg_conv[conv], source="config")
if not cfg.collectd_use_entry_points:
return
import pkg_resources
group = 'bucky.collectd.converters'
for ep in pkg_resources.iter_entry_points(group):
name, klass = ep.name, ep.load()
self._add_converter(name, klass, source=ep.module_name)
def _add_converter(self, name, inst, source="unknown"):
if name not in self.converters:
log.info("Converter: %s from %s", name, source)
self.converters[name] = inst
return
kpriority = getattr(inst, "PRIORITY", 0)
ipriority = getattr(self.converters[name], "PRIORITY", 0)
if kpriority > ipriority:
log.info("Replacing: %s", name)
log.info("Converter: %s from %s", name, source)
self.converters[name] = inst
return
log.info("Ignoring: %s (%s) from %s (priority: %s vs %s)",
name, inst, source, kpriority, ipriority)
class CollectDServer(UDPServer):
def __init__(self, queue, cfg):
super(CollectDServer, self).__init__(cfg.collectd_ip, cfg.collectd_port)
self.queue = queue
self.parser = CollectDParser(cfg.collectd_types)
self.converter = CollectDConverter(cfg)
self.prev_samples = {}
self.last_sample = None
def handle(self, data, addr):
try:
for sample in self.parser.parse(data):
self.last_sample = sample
sample = self.converter.convert(sample)
if sample is None:
continue
host, name, vtype, val, time = sample
if not name.strip():
continue
val = self.calculate(host, name, vtype, val, time)
if val is not None:
self.queue.put((host, name, val, time))
except ProtocolError as e:
log.error("Protocol error: %s", e)
if self.last_sample is not None:
log.info("Last sample: %s", self.last_sample)
return True
def calculate(self, host, name, vtype, val, time):
handlers = {
0: self._calc_counter, # counter
1: lambda _host, _name, v, _time: v, # gauge
2: self._calc_derive, # derive
3: self._calc_absolute # absolute
}
if vtype not in handlers:
log.error("Invalid value type %s for %s", vtype, name)
log.info("Last sample: %s", self.last_sample)
return
return handlers[vtype](host, name, val, time)
def _calc_counter(self, host, name, val, time):
# I need to figure out how to handle wrapping
# Read: http://oss.oetiker.ch/rrdtool/tut/rrdtutorial.en.html
# and then fix later
key = (host, name)
if key not in self.prev_samples:
self.prev_samples[key] = (val, time)
return
pval, ptime = self.prev_samples[key]
self.prev_samples[key] = (val, time)
if val < pval or time <= ptime:
log.error("Invalid COUNTER update for: %s:%s" % key)
log.info("Last sample: %s", self.last_sample)
return
return float(val - pval) / (time - ptime)
def _calc_derive(self, host, name, val, time):
# Like counter, I need to figure out wrapping
key = (host, name)
if key not in self.prev_samples:
self.prev_samples[key] = (val, time)
return
pval, ptime = self.prev_samples[key]
self.prev_samples[key] = (val, time)
if time <= ptime:
log.debug("Invalid DERIVE update for: %s:%s" % key)
log.debug("Last sample: %s", self.last_sample)
return
return float(val - pval) / (time - ptime)
def _calc_absolute(self, host, name, val, time):
key = (host, name)
if key not in self.prev_samples:
self.prev_samples[key] = (val, time)
return
_pval, ptime = self.prev_samples[key]
self.prev_samples[key] = (val, time)
if time <= ptime:
log.error("Invalid ABSOLUTE update for: %s:%s" % key)
log.info("Last sample: %s", self.last_sample)
return
return float(val) / (time - ptime)
| 35.137097 | 93 | 0.553056 |
acf9f7235b49cb7e9d415f782152598806d2202f | 391 | py | Python | setup.py | errantlinguist/tangrams-analysis | 4ca8e2e72210c9953154f0555f7c88337febf21a | [
"Apache-2.0"
] | null | null | null | setup.py | errantlinguist/tangrams-analysis | 4ca8e2e72210c9953154f0555f7c88337febf21a | [
"Apache-2.0"
] | null | null | null | setup.py | errantlinguist/tangrams-analysis | 4ca8e2e72210c9953154f0555f7c88337febf21a | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
setup(name='tangrams_analysis',
version='0.1',
description='ML and analysis tools for the game \"tangrams-restricted\"',
url='https://github.com/errantlinguist/tangrams-analysis',
author='Todd Shore',
author_email='errantlinguist+github@gmail.com',
license='Apache License, Version 2.0',
packages=['tangrams_analysis']) | 39.1 | 79 | 0.69821 |
acf9f7bbb4ef3aef69b2ebfa1a608500130475fc | 5,683 | py | Python | service/image_trace.py | BrightHai/vision-ui | a4fa8d1adea88cbd3b684ef977ce86e834ab3d4e | [
"MIT"
] | null | null | null | service/image_trace.py | BrightHai/vision-ui | a4fa8d1adea88cbd3b684ef977ce86e834ab3d4e | [
"MIT"
] | null | null | null | service/image_trace.py | BrightHai/vision-ui | a4fa8d1adea88cbd3b684ef977ce86e834ab3d4e | [
"MIT"
] | null | null | null | import cv2
import os
import torch
import clip
import numpy as np
from PIL import Image
from scipy import spatial
from service.image_infer import get_ui_infer
from service.image_utils import get_roi_image, img_show
def cosine_similar(l1, l2):
return 1 - spatial.distance.cosine(l1, l2)
class ImageTrace(object):
def __init__(self):
self.device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using {self.device}.")
print("Downloading model will take a while for the first time.")
self.template_target_image = np.zeros([100, 100, 3], dtype=np.uint8)+100
self.model, self.preprocess = clip.load("ViT-B/32", device=self.device)
def search_image(self, target_image_info: dict, source_image_path, top_k, image_alpha, text_alpha):
top_k = top_k # 最大匹配数量
image_alpha = image_alpha # 图相关系数
text_alpha = text_alpha # 文本相关系数
roi_list = []
img_text_score = []
target_image = target_image_info.get('img', self.template_target_image)
target_image_desc = target_image_info.get('desc', '')
source_image = cv2.imread(source_image_path)
image_infer_result = get_ui_infer(source_image_path)
text = clip.tokenize([target_image_desc]).to(self.device)
# 提取检测目标
for roi in image_infer_result:
x1, y1, x2, y2 = list(map(int, roi['elem_det_region']))
roi = get_roi_image(source_image, [[x1, y1], [x2, y1], [x2, y2], [x1, y2]])
img_pil = Image.fromarray(cv2.cvtColor(roi, cv2.COLOR_BGR2RGB))
roi_list.append(self.preprocess(img_pil).to(self.device))
# 计算图像和文本匹配向量
with torch.no_grad():
img_pil = Image.fromarray(cv2.cvtColor(target_image, cv2.COLOR_BGR2RGB))
target_image_input = self.preprocess(img_pil).unsqueeze(0).to(self.device).clone().detach()
target_image_features = self.model.encode_image(target_image_input)
source_image_input = torch.tensor(np.stack(roi_list))
source_image_features = self.model.encode_image(source_image_input)
logits_per_image, logits_per_text = self.model(source_image_input, text)
probs = logits_per_text.softmax(dim=-1).cpu().numpy()
# 图像加文本
for i, source_image_feature in enumerate(source_image_features):
score = cosine_similar(target_image_features[0], source_image_feature)
img_text_score.append(score*image_alpha + probs[0][i]*text_alpha)
score_norm = (img_text_score - np.min(img_text_score)) / (np.max(img_text_score) - np.min(img_text_score))
top_k_ids = np.argsort(score_norm)[-top_k:]
return top_k_ids, score_norm, image_infer_result
def get_trace_result(self, target_image_info, source_image_path, top_k=3, image_alpha=1.0, text_alpha=0.6):
top_k_ids, scores, infer_result = self.search_image(target_image_info, source_image_path,
top_k, image_alpha, text_alpha)
cls_ids = np.zeros(len(top_k_ids), dtype=int)
boxes = [infer_result[i]['elem_det_region'] for i in top_k_ids]
scores = [float(scores[i]) for i in top_k_ids]
image_show = img_show(cv2.imread(source_image_path), boxes, scores, cls_ids, conf=0.5, class_names=['T'])
return image_show
def video_target_track(self, video_path, target_image_info, work_path):
video_cap = cv2.VideoCapture(video_path)
_, im = video_cap.read()
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
im_save_path = os.path.join(work_path, 'im_temp.png')
video_out_path = os.path.join(work_path, 'video_out.mp4')
out = cv2.VideoWriter(video_out_path, fourcc, 20, (im.shape[1], im.shape[0]))
i = 0
while 1:
i = i + 1
if i % 2 == 0:
continue
print(f"video parsing {i}")
ret, im = video_cap.read()
if ret:
cv2.imwrite(im_save_path, im)
trace_result = self.get_trace_result(target_image_info, im_save_path, top_k=1)
out.write(trace_result)
else:
print("finish.")
out.release()
break
def trace_target_video():
target_image_info = {
'path': "./capture/local_images/img_play_icon.png",
'desc': "picture with play button"
}
target_image_info['img'] = cv2.imread(target_image_info['path'])
video_path = "./capture/local_images/video.mp4"
work_path = './capture/local_images'
image_trace = ImageTrace()
image_trace.video_target_track(video_path, target_image_info, work_path)
def search_target_image():
"""
# robust target image search
"""
# 图像目标系数
image_alpha = 1.0
# 文本描述系数
text_alpha = 0.6
# 最大匹配目标数量
top_k = 3
# 构造目标图像
target_img = np.zeros([100, 100, 3], dtype=np.uint8)+255
cv2.putText(target_img, 'Q', (30, 60), cv2.FONT_HERSHEY_SIMPLEX, 2.0, (0, 0, 0), thickness=3)
# 目标语言描述
desc = "shape of magnifier with blue background"
target_image_info = {'img': target_img, 'desc': desc}
source_image_path = "./capture/image_1.png"
trace_result_path = "./capture/local_images/trace_result.png"
# 查找目标
image_trace = ImageTrace()
image_trace_show = image_trace.get_trace_result(target_image_info, source_image_path, top_k=top_k,
image_alpha=image_alpha, text_alpha=text_alpha)
cv2.imwrite(trace_result_path, image_trace_show)
print(f"Result saved {trace_result_path}")
if __name__ == '__main__':
search_target_image()
| 43.381679 | 114 | 0.650713 |
acf9f7f903692b021a98f36e0d96d401d8d9241c | 1,469 | py | Python | per/agents/heads.py | lucaslingle/pytorch_per | ca69d7cd65711db48c0c11d84fb181606c251794 | [
"MIT"
] | null | null | null | per/agents/heads.py | lucaslingle/pytorch_per | ca69d7cd65711db48c0c11d84fb181606c251794 | [
"MIT"
] | null | null | null | per/agents/heads.py | lucaslingle/pytorch_per | ca69d7cd65711db48c0c11d84fb181606c251794 | [
"MIT"
] | null | null | null | import torch as tc
class LinearActionValueHead(tc.nn.Module):
def __init__(self, num_features, num_actions):
super().__init__()
self._num_features = num_features
self._num_actions = num_actions
self._linear = tc.nn.Linear(
in_features=self._num_features,
out_features=self._num_actions,
bias=True)
@property
def num_actions(self):
return self._num_actions
def forward(self, x):
qpred = self._linear(x)
return qpred
class DuelingActionValueHead(tc.nn.Module):
def __init__(self, num_features, num_actions):
super().__init__()
self._num_features = num_features
self._num_actions = num_actions
self._value_head = tc.nn.Sequential(
tc.nn.Linear(self._num_features, self._num_features, bias=True),
tc.nn.ReLU(),
tc.nn.Linear(self._num_features, 1, bias=True)
)
self._advantage_head = tc.nn.Sequential(
tc.nn.Linear(self._num_features, self._num_features, bias=True),
tc.nn.ReLU(),
tc.nn.Linear(self._num_features, self._num_actions, bias=False)
)
@property
def num_actions(self):
return self._num_actions
def forward(self, x):
vpred = self._value_head(x)
apred = self._advantage_head(x)
apred -= apred.mean(dim=-1).unsqueeze(-1)
qpred = vpred + apred
return qpred
| 28.803922 | 76 | 0.62015 |
acf9f8c8f29b59cb7bb68e1b446764dac00f12bc | 2,956 | py | Python | demo_splat.py | ranjian0/python-compute-shaders | d4b292ca0d2a3af687d1d7f146e8e936352ebfaf | [
"MIT"
] | null | null | null | demo_splat.py | ranjian0/python-compute-shaders | d4b292ca0d2a3af687d1d7f146e8e936352ebfaf | [
"MIT"
] | null | null | null | demo_splat.py | ranjian0/python-compute-shaders | d4b292ca0d2a3af687d1d7f146e8e936352ebfaf | [
"MIT"
] | null | null | null | import glfw
import shader
import OpenGL.GL as gl
SCR_WIDTH = 1024
SCR_HEIGHT = 768
RES = lambda filename : f"shaders/splat/{filename}"
def main():
if not glfw.init():
raise ValueError("Failed to initialize glfw")
glfw.window_hint(glfw.CONTEXT_CREATION_API, glfw.NATIVE_CONTEXT_API)
glfw.window_hint(glfw.CLIENT_API, glfw.OPENGL_API)
glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 4)
glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 2)
glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)
glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, True)
glfw.window_hint(glfw.RESIZABLE, True)
glfw.window_hint(glfw.DOUBLEBUFFER, True)
glfw.window_hint(glfw.DEPTH_BITS, 24)
glfw.window_hint(glfw.SAMPLES, 4)
window = glfw.create_window(SCR_WIDTH, SCR_HEIGHT, "Python Compute Shader Demo", None, None)
if not window:
glfw.terminate()
raise ValueError("Failed to create window")
glfw.make_context_current(window)
glfw.set_key_callback(window, key_event_callback)
glfw.set_cursor_pos_callback(window, mouse_event_callback)
glfw.set_mouse_button_callback(window, mouse_button_callback)
glfw.set_window_size_callback(window, window_resize_callback)
CSG = shader.ComputeShader(RES("splat.glsl"))
SplatProgram = shader.Shader(RES("splat.vert"), RES("splat.frag"))
some_UAV = gl.glGenTextures(1)
gl.glActiveTexture(gl.GL_TEXTURE0)
gl.glBindTexture(gl.GL_TEXTURE_2D, some_UAV)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_EDGE)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_EDGE)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)
gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)
gl.glTexImage2D(gl.GL_TEXTURE_2D, 0, gl.GL_RGBA32F, SCR_WIDTH, SCR_HEIGHT, 0, gl.GL_RGBA, gl.GL_FLOAT, None)
gl.glBindImageTexture(0, some_UAV, 0, gl.GL_FALSE, 0, gl.GL_READ_WRITE, gl.GL_RGBA32F)
vao = gl.glGenVertexArrays(1)
gl.glBindVertexArray(vao)
gl.glDisable(gl.GL_CULL_FACE)
gl.glDisable(gl.GL_DEPTH_TEST)
while not glfw.window_should_close(window):
CSG.use()
gl.glDispatchCompute(SCR_WIDTH // 8, SCR_HEIGHT // 8, 1)
gl.glMemoryBarrier(gl.GL_SHADER_IMAGE_ACCESS_BARRIER_BIT)
SplatProgram.use()
SplatProgram.set_int("Whatever", 0)
gl.glDrawArrays(gl.GL_TRIANGLES, 0, 3)
glfw.swap_buffers(window)
glfw.poll_events()
glfw.terminate()
def key_event_callback(window, key, scancode, action, mods):
if key == glfw.KEY_ESCAPE:
glfw.set_window_should_close(window, True)
def mouse_event_callback(window, xpos, ypos):
pass
def mouse_button_callback(window, button, action, mods):
pass
def window_resize_callback(window, width, height):
gl.glViewport(0, width, 0, height)
if __name__ == '__main__':
main()
| 32.844444 | 112 | 0.73816 |
acf9f9cc05c00ee758338af7d9fcf5bde2f3302f | 2,659 | py | Python | server/regression/tensor_learn.py | heena34/APM | 1eef3800ee14bf3c52dac017e37377b3f8eacc11 | [
"RSA-MD"
] | null | null | null | server/regression/tensor_learn.py | heena34/APM | 1eef3800ee14bf3c52dac017e37377b3f8eacc11 | [
"RSA-MD"
] | null | null | null | server/regression/tensor_learn.py | heena34/APM | 1eef3800ee14bf3c52dac017e37377b3f8eacc11 | [
"RSA-MD"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue May 22 14:18:38 2018
@author: GUR44996
"""
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 200
pd.options.display.float_format = '{:.1f}'.format
data_frame = pd.read_csv("https://storage.googleapis.com/mledu-datasets/california_housing_train.csv",sep = ",")
data_frame = data_frame.reindex(np.random.permutation(data_frame.index))
data_frame["median_house_value"]/=1000.0
#total_room colum as series
total_room = data_frame[["total_rooms"]]
#created a feature colum of it
feature_column = [tf.feature_column.numeric_column("total_rooms")]
#median house value colum as series
targets = data_frame["median_house_value"]
tf_optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.0000001)
tf_optimizer = tf.contrib.estimator.clip_gradients_by_norm(tf_optimizer,5.0)
linear_regressor = tf.estimator.LinearRegressor(feature_columns=feature_column,optimizer=tf_optimizer)
def input_func(features,targets,batch_size=1,shuffle=True,num_of_epochs=None):
features = {key:np.array(value) for key,value in dict(features).items()}
ds = Dataset.from_tensor_slices((features,targets))
ds = ds.batch(batch_size).repeat(num_of_epochs)
if shuffle:
ds = ds.shuffle(buffer_size=100000)
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
linear_regressor.train(input_fn= lambda:input_func(total_room,targets),steps=100)
prediction_input_fn =lambda: input_func(total_room, targets, num_of_epochs=1, shuffle=False)
predictions = linear_regressor.predict(input_fn=prediction_input_fn)
# Format predictions as a NumPy array, so we can calculate error metrics.
predictions = np.array([item['predictions'][0] for item in predictions])
mean_squared_error = metrics.mean_squared_error(predictions, targets)
root_mean_squared_error = math.sqrt(mean_squared_error)
min_val = data_frame["median_house_value"].min()
max_val = data_frame["median_house_value"].max()
difference = max_val - min_val
print("Mean Squared Error: %0.5f" % mean_squared_error)
print("Root Mean Squared Error: %0.5f" % root_mean_squared_error)
print("Difference between min and max: %0.5f" % difference)
calibration_data = pd.DataFrame()
calibration_data["predictions"] = pd.Series(predictions)
calibration_data["targets"] = pd.Series(targets)
print(calibration_data)
| 32.426829 | 112 | 0.782625 |
acf9f9eefe37e0406db189f6f16a339831b60b57 | 2,520 | py | Python | TrajectoryClass.py | melkisedeath/Harmonic_Analysis_and_Trajectory | a5a2819c053ddd287dcb668fac2f1be7e44f6c59 | [
"MIT"
] | 1 | 2021-01-16T06:05:50.000Z | 2021-01-16T06:05:50.000Z | TrajectoryClass.py | melkisedeath/Harmonic_Analysis_and_Trajectory | a5a2819c053ddd287dcb668fac2f1be7e44f6c59 | [
"MIT"
] | null | null | null | TrajectoryClass.py | melkisedeath/Harmonic_Analysis_and_Trajectory | a5a2819c053ddd287dcb668fac2f1be7e44f6c59 | [
"MIT"
] | 1 | 2021-01-16T06:06:02.000Z | 2021-01-16T06:06:02.000Z | class TrajectoryClass:
"""A class containing Trajectory Information."""
def __init__(self, initialChordPosition, listOfChords, Tonnetz):
"""Initialize function of Trajectory."""
self.chordPositions = [initialChordPosition]
self.connectingEdges = []
self.index = 1 # Redundant: should always be len(chordPositions)
self.listOfChords = listOfChords
self.Tonnetz = Tonnetz
def addChord(self, chordPosition, connectingEdge):
"""Add a new chord in Trajectory."""
self.chordPositions.append(chordPosition)
self.connectingEdges.append(connectingEdge)
self.index += 1
def getLastPosition(self, offset=1):
"""Get the last chord coordinates, or change offset."""
if offset > self.index:
raise IndexError()
return self.chordPositions[-offset]
def getThisChord(self):
"""Get the PC values of the currect chord."""
return self.listOfChords[self.index]
def getNextChord(self, offset=1):
"""Get the PC values of the next chord, or change offest."""
return self.listOfChords[self.index + offset]
def addType(self, trajType):
"""Precise the type of the Trajectory, recursive, with future, etc."""
self.type = trajType
def chordsRemaining(self):
"""Return the number of remaining chords to place."""
return len(self.listOfChords) - len(self.chordPositions)
# ADD MIDI FILE PROPERTIES
def addNumberOfInstruments(self, numberOfInstruments):
"""How many instruments in midi file.
The number of instruments typically is provides by program changes.
"""
self.numOfInstr = numberOfInstruments
def addInstruments(self, listOfInstruments):
"""A list with all the instruments, no duplicates."""
self.instruments = list(set(listOfInstruments))
self.addNumberOfInstruments(len(set(listOfInstruments)))
def addTempo(self, tempo):
"""Tempo Estimation."""
self.tempo = tempo
def addNumber_of_signature_changes(self, number):
"""Number of time signature changes."""
self.number_of_signature_changes = number
def addTime_signatures(self, signature_changes):
"""Add the time signatures of the piece.
The default value if the time signature is not precised is 4/4
"""
self.time_signatures = list(set(signature_changes))
self.addNumber_of_signature_changes(len(signature_changes))
| 36.521739 | 78 | 0.669048 |
acf9fa559089448679b42e5bb2570811e761530f | 845 | py | Python | bettadb/client.py | jackatbancast/bettaDB | 1156bd1ad0bd4c4537bee2406f65c4e5d37ce96f | [
"MIT"
] | 1 | 2020-08-29T18:18:17.000Z | 2020-08-29T18:18:17.000Z | bettadb/client.py | jackatbancast/bettaDB | 1156bd1ad0bd4c4537bee2406f65c4e5d37ce96f | [
"MIT"
] | null | null | null | bettadb/client.py | jackatbancast/bettaDB | 1156bd1ad0bd4c4537bee2406f65c4e5d37ce96f | [
"MIT"
] | null | null | null | import eventedpy as e
from bettadb.db import DataStore
class Client:
def __init__(self, datastore=None, filename=None, debug=True):
self.evt_loop = e.EventLoop()
self.datastore = datastore or DataStore(filename, self.evt_loop, debug)
self.evt_loop.start()
self.evt_loop.event('log debug', "DEBUG :: Client started")
def insert(self, collection, document, callback=None):
self.evt_loop.event('insert', collection, document, callback)
def delete(self, collection, query, callback=None):
self.evt_loop.event('delete', collection, query, callback)
def find(self, collection, query, callback):
self.evt_loop.event('find', collection, query, callback)
def find_one(self, collection, query, callback):
self.evt_loop.event('find_one', collection, query, callback)
| 38.409091 | 79 | 0.695858 |
acf9fa85d48f27a3b386a2611627bae4b8796268 | 675 | py | Python | logiciel_s6/logiciel_s6_project/manage.py | matthew73210/logiciel_s6 | 57a16f7210a21fe3e2733839598d84796d1257a9 | [
"MIT"
] | null | null | null | logiciel_s6/logiciel_s6_project/manage.py | matthew73210/logiciel_s6 | 57a16f7210a21fe3e2733839598d84796d1257a9 | [
"MIT"
] | null | null | null | logiciel_s6/logiciel_s6_project/manage.py | matthew73210/logiciel_s6 | 57a16f7210a21fe3e2733839598d84796d1257a9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'logiciel_s6_project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 29.347826 | 83 | 0.684444 |
acf9fdd2cb567759f633d86ee34e93a63a557c90 | 1,031 | py | Python | _unittests/ut_binaries/test_common_csdeps.py | sdpython/csharpyml | f814af89c5b988924a7f31fe71ec6eb515292070 | [
"MIT"
] | 4 | 2018-06-07T06:34:32.000Z | 2020-02-12T17:39:58.000Z | _unittests/ut_binaries/test_common_csdeps.py | sdpython/csharpyml | f814af89c5b988924a7f31fe71ec6eb515292070 | [
"MIT"
] | 13 | 2018-05-21T23:06:58.000Z | 2018-12-30T17:57:11.000Z | _unittests/ut_binaries/test_common_csdeps.py | sdpython/csharpyml | f814af89c5b988924a7f31fe71ec6eb515292070 | [
"MIT"
] | null | null | null | """
@brief test log(time=3s)
You should indicate a time in seconds. The program ``run_unittests.py``
will sort all test files by increasing time and run them.
"""
import sys
import os
import unittest
from sklearn import datasets
from pyquickhelper.pycode import ExtTestCase
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.csharpyml.binaries import get_mlnet_assemblies
class TestCsCommonDependencies(ExtTestCase):
"""Test C# dataframes."""
def test_src(self):
"skip pylint"
self.assertFalse(src is None)
self.assertFalse(datasets is None)
def test_common_dependencies(self):
deps, using = get_mlnet_assemblies()
self.assertNotEmpty(deps)
self.assertNotEmpty(using)
if __name__ == "__main__":
unittest.main()
| 22.911111 | 71 | 0.647915 |
acf9fe14321252f546c9a0c37e64004ef22e8c9c | 64,058 | py | Python | vkwave/api/methods/groups.py | XIDY-Dex/vkwave | a4fb471652be467853ca6a7d1c645547e66f5679 | [
"MIT"
] | 3 | 2020-12-03T12:32:15.000Z | 2022-02-02T06:41:39.000Z | vkwave/api/methods/groups.py | XIDY-Dex/vkwave | a4fb471652be467853ca6a7d1c645547e66f5679 | [
"MIT"
] | null | null | null | vkwave/api/methods/groups.py | XIDY-Dex/vkwave | a4fb471652be467853ca6a7d1c645547e66f5679 | [
"MIT"
] | null | null | null | from vkwave.types.responses import *
from ._category import Category
from ._utils import get_params
class Groups(Category):
async def add_address(
self,
group_id: int,
title: str,
address: str,
country_id: int,
city_id: int,
latitude: int,
longitude: int,
return_raw_response: bool = False,
additional_address: typing.Optional[str] = None,
metro_id: typing.Optional[int] = None,
phone: typing.Optional[str] = None,
work_info_status: typing.Optional[str] = None,
timetable: typing.Optional[str] = None,
is_main_address: typing.Optional[bool] = None,
) -> typing.Union[dict, GroupsAddAddressResponse]:
"""
:param group_id:
:param title:
:param address:
:param additional_address:
:param country_id:
:param city_id:
:param metro_id:
:param latitude:
:param longitude:
:param phone:
:param work_info_status:
:param timetable:
:param is_main_address:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("addAddress", params)
if return_raw_response:
return raw_result
result = GroupsAddAddressResponse(**raw_result)
return result
async def add_callback_server(
self,
group_id: int,
url: str,
title: str,
return_raw_response: bool = False,
secret_key: typing.Optional[str] = None,
) -> typing.Union[dict, GroupsAddCallbackServerResponse]:
"""
:param group_id:
:param url:
:param title:
:param secret_key:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("addCallbackServer", params)
if return_raw_response:
return raw_result
result = GroupsAddCallbackServerResponse(**raw_result)
return result
async def add_link(
self,
group_id: int,
link: str,
return_raw_response: bool = False,
text: typing.Optional[str] = None,
) -> typing.Union[dict, GroupsAddLinkResponse]:
"""
:param group_id: - Community ID.
:param link: - Link URL.
:param text: - Description text for the link.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("addLink", params)
if return_raw_response:
return raw_result
result = GroupsAddLinkResponse(**raw_result)
return result
async def approve_request(
self,
group_id: int,
user_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id: - Community ID.
:param user_id: - User ID.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("approveRequest", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def ban(
self,
group_id: int,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
end_date: typing.Optional[int] = None,
reason: typing.Optional[int] = None,
comment: typing.Optional[str] = None,
comment_visible: typing.Optional[bool] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id:
:param owner_id:
:param end_date:
:param reason:
:param comment:
:param comment_visible:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("ban", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def create(
self,
title: str,
return_raw_response: bool = False,
description: typing.Optional[str] = None,
type: typing.Optional[str] = None,
public_category: typing.Optional[int] = None,
subtype: typing.Optional[int] = None,
) -> typing.Union[dict, GroupsCreateResponse]:
"""
:param title: - Community title.
:param description: - Community description (ignored for 'type' = 'public').
:param type: - Community type. Possible values: *'group' – group,, *'event' – event,, *'public' – public page
:param public_category: - Category ID (for 'type' = 'public' only).
:param subtype: - Public page subtype. Possible values: *'1' – place or small business,, *'2' – company, organization or website,, *'3' – famous person or group of people,, *'4' – product or work of art.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("create", params)
if return_raw_response:
return raw_result
result = GroupsCreateResponse(**raw_result)
return result
async def delete_address(
self,
group_id: int,
address_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id:
:param address_id:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("deleteAddress", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def delete_callback_server(
self,
group_id: int,
server_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id:
:param server_id:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("deleteCallbackServer", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def delete_link(
self,
group_id: int,
link_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id: - Community ID.
:param link_id: - Link ID.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("deleteLink", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def disable_online(
self,
group_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("disableOnline", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def edit(
self,
group_id: int,
return_raw_response: bool = False,
title: typing.Optional[str] = None,
description: typing.Optional[str] = None,
screen_name: typing.Optional[str] = None,
access: typing.Optional[int] = None,
website: typing.Optional[str] = None,
subject: typing.Optional[str] = None,
email: typing.Optional[str] = None,
phone: typing.Optional[str] = None,
rss: typing.Optional[str] = None,
event_start_date: typing.Optional[int] = None,
event_finish_date: typing.Optional[int] = None,
event_group_id: typing.Optional[int] = None,
public_category: typing.Optional[int] = None,
public_subcategory: typing.Optional[int] = None,
public_date: typing.Optional[str] = None,
wall: typing.Optional[int] = None,
topics: typing.Optional[int] = None,
photos: typing.Optional[int] = None,
video: typing.Optional[int] = None,
audio: typing.Optional[int] = None,
links: typing.Optional[BaseBoolInt] = None,
events: typing.Optional[BaseBoolInt] = None,
places: typing.Optional[BaseBoolInt] = None,
contacts: typing.Optional[BaseBoolInt] = None,
docs: typing.Optional[int] = None,
wiki: typing.Optional[int] = None,
messages: typing.Optional[BaseBoolInt] = None,
articles: typing.Optional[bool] = None,
addresses: typing.Optional[bool] = None,
age_limits: typing.Optional[int] = None,
market: typing.Optional[BaseBoolInt] = None,
market_comments: typing.Optional[BaseBoolInt] = None,
market_country: typing.Optional[typing.List[int]] = None,
market_city: typing.Optional[typing.List[int]] = None,
market_currency: typing.Optional[int] = None,
market_contact: typing.Optional[int] = None,
market_wiki: typing.Optional[int] = None,
obscene_filter: typing.Optional[BaseBoolInt] = None,
obscene_stopwords: typing.Optional[BaseBoolInt] = None,
obscene_words: typing.Optional[typing.List[str]] = None,
main_section: typing.Optional[int] = None,
secondary_section: typing.Optional[int] = None,
country: typing.Optional[int] = None,
city: typing.Optional[int] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id: - Community ID.
:param title: - Community title.
:param description: - Community description.
:param screen_name: - Community screen name.
:param access: - Community type. Possible values: *'0' – open,, *'1' – closed,, *'2' – private.
:param website: - Website that will be displayed in the community information field.
:param subject: - Community subject. Possible values: , *'1' – auto/moto,, *'2' – activity holidays,, *'3' – business,, *'4' – pets,, *'5' – health,, *'6' – dating and communication, , *'7' – games,, *'8' – IT (computers and software),, *'9' – cinema,, *'10' – beauty and fashion,, *'11' – cooking,, *'12' – art and culture,, *'13' – literature,, *'14' – mobile services and internet,, *'15' – music,, *'16' – science and technology,, *'17' – real estate,, *'18' – news and media,, *'19' – security,, *'20' – education,, *'21' – home and renovations,, *'22' – politics,, *'23' – food,, *'24' – industry,, *'25' – travel,, *'26' – work,, *'27' – entertainment,, *'28' – religion,, *'29' – family,, *'30' – sports,, *'31' – insurance,, *'32' – television,, *'33' – goods and services,, *'34' – hobbies,, *'35' – finance,, *'36' – photo,, *'37' – esoterics,, *'38' – electronics and appliances,, *'39' – erotic,, *'40' – humor,, *'41' – society, humanities,, *'42' – design and graphics.
:param email: - Organizer email (for events).
:param phone: - Organizer phone number (for events).
:param rss: - RSS feed address for import (available only to communities with special permission. Contact vk.com/support to get it.
:param event_start_date: - Event start date in Unixtime format.
:param event_finish_date: - Event finish date in Unixtime format.
:param event_group_id: - Organizer community ID (for events only).
:param public_category: - Public page category ID.
:param public_subcategory: - Public page subcategory ID.
:param public_date: - Founding date of a company or organization owning the community in "dd.mm.YYYY" format.
:param wall: - Wall settings. Possible values: *'0' – disabled,, *'1' – open,, *'2' – limited (groups and events only),, *'3' – closed (groups and events only).
:param topics: - Board topics settings. Possbile values: , *'0' – disabled,, *'1' – open,, *'2' – limited (for groups and events only).
:param photos: - Photos settings. Possible values: *'0' – disabled,, *'1' – open,, *'2' – limited (for groups and events only).
:param video: - Video settings. Possible values: *'0' – disabled,, *'1' – open,, *'2' – limited (for groups and events only).
:param audio: - Audio settings. Possible values: *'0' – disabled,, *'1' – open,, *'2' – limited (for groups and events only).
:param links: - Links settings (for public pages only). Possible values: *'0' – disabled,, *'1' – enabled.
:param events: - Events settings (for public pages only). Possible values: *'0' – disabled,, *'1' – enabled.
:param places: - Places settings (for public pages only). Possible values: *'0' – disabled,, *'1' – enabled.
:param contacts: - Contacts settings (for public pages only). Possible values: *'0' – disabled,, *'1' – enabled.
:param docs: - Documents settings. Possible values: *'0' – disabled,, *'1' – open,, *'2' – limited (for groups and events only).
:param wiki: - Wiki pages settings. Possible values: *'0' – disabled,, *'1' – open,, *'2' – limited (for groups and events only).
:param messages: - Community messages. Possible values: *'0' — disabled,, *'1' — enabled.
:param articles:
:param addresses:
:param age_limits: - Community age limits. Possible values: *'1' — no limits,, *'2' — 16+,, *'3' — 18+.
:param market: - Market settings. Possible values: *'0' – disabled,, *'1' – enabled.
:param market_comments: - market comments settings. Possible values: *'0' – disabled,, *'1' – enabled.
:param market_country: - Market delivery countries.
:param market_city: - Market delivery cities (if only one country is specified).
:param market_currency: - Market currency settings. Possbile values: , *'643' – Russian rubles,, *'980' – Ukrainian hryvnia,, *'398' – Kazakh tenge,, *'978' – Euro,, *'840' – US dollars
:param market_contact: - Seller contact for market. Set '0' for community messages.
:param market_wiki: - ID of a wiki page with market description.
:param obscene_filter: - Obscene expressions filter in comments. Possible values: , *'0' – disabled,, *'1' – enabled.
:param obscene_stopwords: - Stopwords filter in comments. Possible values: , *'0' – disabled,, *'1' – enabled.
:param obscene_words: - Keywords for stopwords filter.
:param main_section:
:param secondary_section:
:param country: - Country of the community.
:param city: - City of the community.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("edit", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def edit_address(
self,
group_id: int,
address_id: int,
return_raw_response: bool = False,
title: typing.Optional[str] = None,
address: typing.Optional[str] = None,
additional_address: typing.Optional[str] = None,
country_id: typing.Optional[int] = None,
city_id: typing.Optional[int] = None,
metro_id: typing.Optional[int] = None,
latitude: typing.Optional[int] = None,
longitude: typing.Optional[int] = None,
phone: typing.Optional[str] = None,
work_info_status: typing.Optional[str] = None,
timetable: typing.Optional[str] = None,
is_main_address: typing.Optional[bool] = None,
) -> typing.Union[dict, GroupsEditAddressResponse]:
"""
:param group_id:
:param address_id:
:param title:
:param address:
:param additional_address:
:param country_id:
:param city_id:
:param metro_id:
:param latitude:
:param longitude:
:param phone:
:param work_info_status:
:param timetable:
:param is_main_address:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("editAddress", params)
if return_raw_response:
return raw_result
result = GroupsEditAddressResponse(**raw_result)
return result
async def edit_callback_server(
self,
group_id: int,
server_id: int,
url: str,
title: str,
return_raw_response: bool = False,
secret_key: typing.Optional[str] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id:
:param server_id:
:param url:
:param title:
:param secret_key:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("editCallbackServer", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def edit_link(
self,
group_id: int,
link_id: int,
return_raw_response: bool = False,
text: typing.Optional[str] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id: - Community ID.
:param link_id: - Link ID.
:param text: - New description text for the link.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("editLink", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def edit_manager(
self,
group_id: int,
user_id: int,
return_raw_response: bool = False,
role: typing.Optional[str] = None,
is_contact: typing.Optional[BaseBoolInt] = None,
contact_position: typing.Optional[str] = None,
contact_phone: typing.Optional[str] = None,
contact_email: typing.Optional[str] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id: - Community ID.
:param user_id: - User ID.
:param role: - Manager role. Possible values: *'moderator',, *'editor',, *'administrator',, *'advertiser'.
:param is_contact: - '1' — to show the manager in Contacts block of the community.
:param contact_position: - Position to show in Contacts block.
:param contact_phone: - Contact phone.
:param contact_email: - Contact e-mail.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("editManager", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def enable_online(
self,
group_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("enableOnline", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def get(
self,
return_raw_response: bool = False,
user_id: typing.Optional[int] = None,
extended: typing.Optional[BaseBoolInt] = None,
filter: typing.Optional[typing.List[GroupsFilter]] = None,
fields: typing.Optional[typing.List[GroupsFields]] = None,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
) -> typing.Union[dict, GroupsGetResponse, GroupsGetExtendedResponse]:
"""
:param user_id: - User ID.
:param extended: - '1' — to return complete information about a user's communities, '0' — to return a list of community IDs without any additional fields (default),
:param filter: - Types of communities to return: 'admin' — to return communities administered by the user , 'editor' — to return communities where the user is an administrator or editor, 'moder' — to return communities where the user is an administrator, editor, or moderator, 'groups' — to return only groups, 'publics' — to return only public pages, 'events' — to return only events
:param fields: - Profile fields to return.
:param offset: - Offset needed to return a specific subset of communities.
:param count: - Number of communities to return.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("get", params)
if return_raw_response:
return raw_result
result = (
GroupsGetResponse(**raw_result)
if not extended
else GroupsGetExtendedResponse(**raw_result)
)
return result
async def get_addresses(
self,
group_id: int,
return_raw_response: bool = False,
address_ids: typing.Optional[typing.List[int]] = None,
latitude: typing.Optional[int] = None,
longitude: typing.Optional[int] = None,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
fields: typing.Optional[typing.List[AddressesFields]] = None,
) -> typing.Union[dict, GroupsGetAddressesResponse]:
"""
:param group_id: - ID or screen name of the community.
:param address_ids:
:param latitude: - Latitude of the user geo position.
:param longitude: - Longitude of the user geo position.
:param offset: - Offset needed to return a specific subset of community addresses.
:param count: - Number of community addresses to return.
:param fields: - Address fields
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getAddresses", params)
if return_raw_response:
return raw_result
result = GroupsGetAddressesResponse(**raw_result)
return result
async def get_banned(
self,
group_id: int,
return_raw_response: bool = False,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
fields: typing.Optional[typing.List[BaseUserGroupFields]] = None,
owner_id: typing.Optional[int] = None,
) -> typing.Union[dict, GroupsGetBannedResponse]:
"""
:param group_id: - Community ID.
:param offset: - Offset needed to return a specific subset of users.
:param count: - Number of users to return.
:param fields:
:param owner_id:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getBanned", params)
if return_raw_response:
return raw_result
result = GroupsGetBannedResponse(**raw_result)
return result
async def get_by_id(
self,
return_raw_response: bool = False,
group_ids: typing.Optional[typing.List[str]] = None,
group_id: typing.Optional[str] = None,
fields: typing.Optional[typing.List[GroupsFields]] = None,
) -> typing.Union[dict, GroupsGetByIdLegacyResponse]:
"""
:param group_ids: - IDs or screen names of communities.
:param group_id: - ID or screen name of the community.
:param fields: - Group fields to return.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getById", params)
if return_raw_response:
return raw_result
result = GroupsGetByIdLegacyResponse(**raw_result)
return result
async def get_callback_confirmation_code(
self,
group_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, GroupsGetCallbackConfirmationCodeResponse]:
"""
:param group_id: - Community ID.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getCallbackConfirmationCode", params)
if return_raw_response:
return raw_result
result = GroupsGetCallbackConfirmationCodeResponse(**raw_result)
return result
async def get_callback_servers(
self,
group_id: int,
return_raw_response: bool = False,
server_ids: typing.Optional[typing.List[int]] = None,
) -> typing.Union[dict, GroupsGetCallbackServersResponse]:
"""
:param group_id:
:param server_ids:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getCallbackServers", params)
if return_raw_response:
return raw_result
result = GroupsGetCallbackServersResponse(**raw_result)
return result
async def get_callback_settings(
self,
group_id: int,
return_raw_response: bool = False,
server_id: typing.Optional[int] = None,
) -> typing.Union[dict, GroupsGetCallbackSettingsResponse]:
"""
:param group_id: - Community ID.
:param server_id: - Server ID.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getCallbackSettings", params)
if return_raw_response:
return raw_result
result = GroupsGetCallbackSettingsResponse(**raw_result)
return result
async def get_catalog(
self,
return_raw_response: bool = False,
category_id: typing.Optional[int] = None,
subcategory_id: typing.Optional[int] = None,
) -> typing.Union[dict, GroupsGetCatalogResponse]:
"""
:param category_id: - Category id received from [vk.com/dev/groups.getCatalogInfo|groups.getCatalogInfo].
:param subcategory_id: - Subcategory id received from [vk.com/dev/groups.getCatalogInfo|groups.getCatalogInfo].
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getCatalog", params)
if return_raw_response:
return raw_result
result = GroupsGetCatalogResponse(**raw_result)
return result
async def get_catalog_info(
self,
return_raw_response: bool = False,
extended: typing.Optional[BaseBoolInt] = None,
subcategories: typing.Optional[BaseBoolInt] = None,
) -> typing.Union[dict, GroupsGetCatalogInfoResponse, GroupsGetCatalogInfoExtendedResponse]:
"""
:param extended: - 1 – to return communities count and three communities for preview. By default: 0.
:param subcategories: - 1 – to return subcategories info. By default: 0.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getCatalogInfo", params)
if return_raw_response:
return raw_result
result = (
GroupsGetCatalogInfoResponse(**raw_result)
if not extended
else GroupsGetCatalogInfoExtendedResponse(**raw_result)
)
return result
async def get_invited_users(
self,
group_id: int,
return_raw_response: bool = False,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
fields: typing.Optional[typing.List[UsersFields]] = None,
name_case: typing.Optional[str] = None,
) -> typing.Union[dict, GroupsGetInvitedUsersResponse]:
"""
:param group_id: - Group ID to return invited users for.
:param offset: - Offset needed to return a specific subset of results.
:param count: - Number of results to return.
:param fields: - List of additional fields to be returned. Available values: 'sex, bdate, city, country, photo_50, photo_100, photo_200_orig, photo_200, photo_400_orig, photo_max, photo_max_orig, online, online_mobile, lists, domain, has_mobile, contacts, connections, site, education, universities, schools, can_post, can_see_all_posts, can_see_audio, can_write_private_message, status, last_seen, common_count, relation, relatives, counters'.
:param name_case: - Case for declension of user name and surname. Possible values: *'nom' — nominative (default),, *'gen' — genitive,, *'dat' — dative,, *'acc' — accusative, , *'ins' — instrumental,, *'abl' — prepositional.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getInvitedUsers", params)
if return_raw_response:
return raw_result
result = GroupsGetInvitedUsersResponse(**raw_result)
return result
async def get_invites(
self,
return_raw_response: bool = False,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
extended: typing.Optional[BaseBoolInt] = None,
) -> typing.Union[dict, GroupsGetInvitesResponse, GroupsGetInvitesExtendedResponse]:
"""
:param offset: - Offset needed to return a specific subset of invitations.
:param count: - Number of invitations to return.
:param extended: - '1' — to return additional [vk.com/dev/fields_groups|fields] for communities..
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getInvites", params)
if return_raw_response:
return raw_result
result = (
GroupsGetInvitesResponse(**raw_result)
if not extended
else GroupsGetInvitesExtendedResponse(**raw_result)
)
return result
async def get_long_poll_server(
self,
group_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, GroupsGetLongPollServerResponse]:
"""
:param group_id: - Community ID
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getLongPollServer", params)
if return_raw_response:
return raw_result
result = GroupsGetLongPollServerResponse(**raw_result)
return result
async def get_long_poll_settings(
self,
group_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, GroupsGetLongPollSettingsResponse]:
"""
:param group_id: - Community ID.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getLongPollSettings", params)
if return_raw_response:
return raw_result
result = GroupsGetLongPollSettingsResponse(**raw_result)
return result
async def get_members(
self,
return_raw_response: bool = False,
group_id: typing.Optional[str] = None,
sort: typing.Optional[str] = None,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
fields: typing.Optional[typing.List[UsersFields]] = None,
filter: typing.Optional[str] = None,
) -> typing.Union[dict, GroupsGetMembersResponse, GroupsGetMembersFieldsResponse]:
"""
:param group_id: - ID or screen name of the community.
:param sort: - Sort order. Available values: 'id_asc', 'id_desc', 'time_asc', 'time_desc'. 'time_asc' and 'time_desc' are availavle only if the method is called by the group's 'moderator'.
:param offset: - Offset needed to return a specific subset of community members.
:param count: - Number of community members to return.
:param fields: - List of additional fields to be returned. Available values: 'sex, bdate, city, country, photo_50, photo_100, photo_200_orig, photo_200, photo_400_orig, photo_max, photo_max_orig, online, online_mobile, lists, domain, has_mobile, contacts, connections, site, education, universities, schools, can_post, can_see_all_posts, can_see_audio, can_write_private_message, status, last_seen, common_count, relation, relatives, counters'.
:param filter: - *'friends' – only friends in this community will be returned,, *'unsure' – only those who pressed 'I may attend' will be returned (if it's an event).
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getMembers", params)
if return_raw_response:
return raw_result
result = (
GroupsGetMembersResponse(**raw_result)
if not fields
else GroupsGetMembersFieldsResponse(**raw_result)
)
return result
async def get_requests(
self,
group_id: int,
return_raw_response: bool = False,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
fields: typing.Optional[typing.List[UsersFields]] = None,
) -> typing.Union[dict, GroupsGetRequestsResponse, GroupsGetRequestsFieldsResponse]:
"""
:param group_id: - Community ID.
:param offset: - Offset needed to return a specific subset of results.
:param count: - Number of results to return.
:param fields: - Profile fields to return.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getRequests", params)
if return_raw_response:
return raw_result
result = (
GroupsGetRequestsResponse(**raw_result)
if not fields
else GroupsGetRequestsFieldsResponse(**raw_result)
)
return result
async def get_settings(
self,
group_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, GroupsGetSettingsResponse]:
"""
:param group_id: - Community ID.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getSettings", params)
if return_raw_response:
return raw_result
result = GroupsGetSettingsResponse(**raw_result)
return result
async def get_tag_list(
self,
group_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, GroupsGetTagListResponse]:
"""
:param group_id:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getTagList", params)
if return_raw_response:
return raw_result
result = GroupsGetTagListResponse(**raw_result)
return result
async def get_token_permissions(
self,
return_raw_response: bool = False,
) -> typing.Union[dict, GroupsGetTokenPermissionsResponse]:
"""
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("getTokenPermissions", params)
if return_raw_response:
return raw_result
result = GroupsGetTokenPermissionsResponse(**raw_result)
return result
async def invite(
self,
group_id: int,
user_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id: - Community ID.
:param user_id: - User ID.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("invite", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def is_member(
self,
group_id: str,
return_raw_response: bool = False,
user_id: typing.Optional[int] = None,
user_ids: typing.Optional[typing.List[int]] = None,
extended: typing.Optional[BaseBoolInt] = None,
) -> typing.Union[dict, GroupsIsMemberResponse, GroupsIsMemberExtendedResponse]:
"""
:param group_id: - ID or screen name of the community.
:param user_id: - User ID.
:param user_ids: - User IDs.
:param extended: - '1' — to return an extended response with additional fields. By default: '0'.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("isMember", params)
if return_raw_response:
return raw_result
result = (
GroupsIsMemberResponse(**raw_result)
if not extended
else GroupsIsMemberExtendedResponse(**raw_result)
)
return result
async def join(
self,
return_raw_response: bool = False,
group_id: typing.Optional[int] = None,
not_sure: typing.Optional[str] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id: - ID or screen name of the community.
:param not_sure: - Optional parameter which is taken into account when 'gid' belongs to the event: '1' — Perhaps I will attend, '0' — I will be there for sure (default), ,
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("join", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def leave(
self,
group_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id: - ID or screen name of the community.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("leave", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def remove_user(
self,
group_id: int,
user_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id: - Community ID.
:param user_id: - User ID.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("removeUser", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def reorder_link(
self,
group_id: int,
link_id: int,
return_raw_response: bool = False,
after: typing.Optional[int] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id: - Community ID.
:param link_id: - Link ID.
:param after: - ID of the link after which to place the link with 'link_id'.
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("reorderLink", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def search(
self,
q: str,
return_raw_response: bool = False,
type: typing.Optional[str] = None,
country_id: typing.Optional[int] = None,
city_id: typing.Optional[int] = None,
future: typing.Optional[BaseBoolInt] = None,
market: typing.Optional[BaseBoolInt] = None,
sort: typing.Optional[int] = None,
offset: typing.Optional[int] = None,
count: typing.Optional[int] = None,
) -> typing.Union[dict, GroupsSearchResponse]:
"""
:param q: - Search query string.
:param type: - Community type. Possible values: 'group, page, event.'
:param country_id: - Country ID.
:param city_id: - City ID. If this parameter is transmitted, country_id is ignored.
:param future: - '1' — to return only upcoming events. Works with the 'type' = 'event' only.
:param market: - '1' — to return communities with enabled market only.
:param sort: - Sort order. Possible values: *'0' — default sorting (similar the full version of the site),, *'1' — by growth speed,, *'2'— by the "day attendance/members number" ratio,, *'3' — by the "Likes number/members number" ratio,, *'4' — by the "comments number/members number" ratio,, *'5' — by the "boards entries number/members number" ratio.
:param offset: - Offset needed to return a specific subset of results.
:param count: - Number of communities to return. "Note that you can not receive more than first thousand of results, regardless of 'count' and 'offset' values."
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("search", params)
if return_raw_response:
return raw_result
result = GroupsSearchResponse(**raw_result)
return result
async def set_callback_settings(
self,
group_id: int,
return_raw_response: bool = False,
server_id: typing.Optional[int] = None,
api_version: typing.Optional[str] = None,
message_new: typing.Optional[BaseBoolInt] = None,
message_reply: typing.Optional[BaseBoolInt] = None,
message_allow: typing.Optional[BaseBoolInt] = None,
message_edit: typing.Optional[bool] = None,
message_deny: typing.Optional[BaseBoolInt] = None,
message_typing_state: typing.Optional[bool] = None,
photo_new: typing.Optional[BaseBoolInt] = None,
audio_new: typing.Optional[BaseBoolInt] = None,
video_new: typing.Optional[BaseBoolInt] = None,
wall_reply_new: typing.Optional[BaseBoolInt] = None,
wall_reply_edit: typing.Optional[BaseBoolInt] = None,
wall_reply_delete: typing.Optional[BaseBoolInt] = None,
wall_reply_restore: typing.Optional[BaseBoolInt] = None,
wall_post_new: typing.Optional[BaseBoolInt] = None,
wall_repost: typing.Optional[BaseBoolInt] = None,
board_post_new: typing.Optional[BaseBoolInt] = None,
board_post_edit: typing.Optional[BaseBoolInt] = None,
board_post_restore: typing.Optional[BaseBoolInt] = None,
board_post_delete: typing.Optional[BaseBoolInt] = None,
photo_comment_new: typing.Optional[BaseBoolInt] = None,
photo_comment_edit: typing.Optional[BaseBoolInt] = None,
photo_comment_delete: typing.Optional[BaseBoolInt] = None,
photo_comment_restore: typing.Optional[BaseBoolInt] = None,
video_comment_new: typing.Optional[BaseBoolInt] = None,
video_comment_edit: typing.Optional[BaseBoolInt] = None,
video_comment_delete: typing.Optional[BaseBoolInt] = None,
video_comment_restore: typing.Optional[BaseBoolInt] = None,
market_comment_new: typing.Optional[BaseBoolInt] = None,
market_comment_edit: typing.Optional[BaseBoolInt] = None,
market_comment_delete: typing.Optional[BaseBoolInt] = None,
market_comment_restore: typing.Optional[BaseBoolInt] = None,
market_order_new: typing.Optional[bool] = None,
market_order_edit: typing.Optional[bool] = None,
poll_vote_new: typing.Optional[BaseBoolInt] = None,
group_join: typing.Optional[BaseBoolInt] = None,
group_leave: typing.Optional[BaseBoolInt] = None,
group_change_settings: typing.Optional[bool] = None,
group_change_photo: typing.Optional[bool] = None,
group_officers_edit: typing.Optional[bool] = None,
user_block: typing.Optional[bool] = None,
user_unblock: typing.Optional[bool] = None,
lead_forms_new: typing.Optional[bool] = None,
like_add: typing.Optional[bool] = None,
like_remove: typing.Optional[bool] = None,
message_event: typing.Optional[bool] = None,
donut_subscription_create: typing.Optional[bool] = None,
donut_subscription_prolonged: typing.Optional[bool] = None,
donut_subscription_cancelled: typing.Optional[bool] = None,
donut_subscription_price_changed: typing.Optional[bool] = None,
donut_subscription_expired: typing.Optional[bool] = None,
donut_money_withdraw: typing.Optional[bool] = None,
donut_money_withdraw_error: typing.Optional[bool] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id: - Community ID.
:param server_id: - Server ID.
:param api_version:
:param message_new: - A new incoming message has been received ('0' — disabled, '1' — enabled).
:param message_reply: - A new outcoming message has been received ('0' — disabled, '1' — enabled).
:param message_allow: - Allowed messages notifications ('0' — disabled, '1' — enabled).
:param message_edit:
:param message_deny: - Denied messages notifications ('0' — disabled, '1' — enabled).
:param message_typing_state:
:param photo_new: - New photos notifications ('0' — disabled, '1' — enabled).
:param audio_new: - New audios notifications ('0' — disabled, '1' — enabled).
:param video_new: - New videos notifications ('0' — disabled, '1' — enabled).
:param wall_reply_new: - New wall replies notifications ('0' — disabled, '1' — enabled).
:param wall_reply_edit: - Wall replies edited notifications ('0' — disabled, '1' — enabled).
:param wall_reply_delete: - A wall comment has been deleted ('0' — disabled, '1' — enabled).
:param wall_reply_restore: - A wall comment has been restored ('0' — disabled, '1' — enabled).
:param wall_post_new: - New wall posts notifications ('0' — disabled, '1' — enabled).
:param wall_repost: - New wall posts notifications ('0' — disabled, '1' — enabled).
:param board_post_new: - New board posts notifications ('0' — disabled, '1' — enabled).
:param board_post_edit: - Board posts edited notifications ('0' — disabled, '1' — enabled).
:param board_post_restore: - Board posts restored notifications ('0' — disabled, '1' — enabled).
:param board_post_delete: - Board posts deleted notifications ('0' — disabled, '1' — enabled).
:param photo_comment_new: - New comment to photo notifications ('0' — disabled, '1' — enabled).
:param photo_comment_edit: - A photo comment has been edited ('0' — disabled, '1' — enabled).
:param photo_comment_delete: - A photo comment has been deleted ('0' — disabled, '1' — enabled).
:param photo_comment_restore: - A photo comment has been restored ('0' — disabled, '1' — enabled).
:param video_comment_new: - New comment to video notifications ('0' — disabled, '1' — enabled).
:param video_comment_edit: - A video comment has been edited ('0' — disabled, '1' — enabled).
:param video_comment_delete: - A video comment has been deleted ('0' — disabled, '1' — enabled).
:param video_comment_restore: - A video comment has been restored ('0' — disabled, '1' — enabled).
:param market_comment_new: - New comment to market item notifications ('0' — disabled, '1' — enabled).
:param market_comment_edit: - A market comment has been edited ('0' — disabled, '1' — enabled).
:param market_comment_delete: - A market comment has been deleted ('0' — disabled, '1' — enabled).
:param market_comment_restore: - A market comment has been restored ('0' — disabled, '1' — enabled).
:param market_order_new:
:param market_order_edit:
:param poll_vote_new: - A vote in a public poll has been added ('0' — disabled, '1' — enabled).
:param group_join: - Joined community notifications ('0' — disabled, '1' — enabled).
:param group_leave: - Left community notifications ('0' — disabled, '1' — enabled).
:param group_change_settings:
:param group_change_photo:
:param group_officers_edit:
:param user_block: - User added to community blacklist
:param user_unblock: - User removed from community blacklist
:param lead_forms_new: - New form in lead forms
:param like_add:
:param like_remove:
:param message_event:
:param donut_subscription_create:
:param donut_subscription_prolonged:
:param donut_subscription_cancelled:
:param donut_subscription_price_changed:
:param donut_subscription_expired:
:param donut_money_withdraw:
:param donut_money_withdraw_error:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("setCallbackSettings", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def set_long_poll_settings(
self,
group_id: int,
return_raw_response: bool = False,
enabled: typing.Optional[BaseBoolInt] = None,
api_version: typing.Optional[str] = None,
message_new: typing.Optional[BaseBoolInt] = None,
message_reply: typing.Optional[BaseBoolInt] = None,
message_allow: typing.Optional[BaseBoolInt] = None,
message_deny: typing.Optional[BaseBoolInt] = None,
message_edit: typing.Optional[BaseBoolInt] = None,
message_typing_state: typing.Optional[bool] = None,
photo_new: typing.Optional[BaseBoolInt] = None,
audio_new: typing.Optional[BaseBoolInt] = None,
video_new: typing.Optional[BaseBoolInt] = None,
wall_reply_new: typing.Optional[BaseBoolInt] = None,
wall_reply_edit: typing.Optional[BaseBoolInt] = None,
wall_reply_delete: typing.Optional[BaseBoolInt] = None,
wall_reply_restore: typing.Optional[BaseBoolInt] = None,
wall_post_new: typing.Optional[BaseBoolInt] = None,
wall_repost: typing.Optional[BaseBoolInt] = None,
board_post_new: typing.Optional[BaseBoolInt] = None,
board_post_edit: typing.Optional[BaseBoolInt] = None,
board_post_restore: typing.Optional[BaseBoolInt] = None,
board_post_delete: typing.Optional[BaseBoolInt] = None,
photo_comment_new: typing.Optional[BaseBoolInt] = None,
photo_comment_edit: typing.Optional[BaseBoolInt] = None,
photo_comment_delete: typing.Optional[BaseBoolInt] = None,
photo_comment_restore: typing.Optional[BaseBoolInt] = None,
video_comment_new: typing.Optional[BaseBoolInt] = None,
video_comment_edit: typing.Optional[BaseBoolInt] = None,
video_comment_delete: typing.Optional[BaseBoolInt] = None,
video_comment_restore: typing.Optional[BaseBoolInt] = None,
market_comment_new: typing.Optional[BaseBoolInt] = None,
market_comment_edit: typing.Optional[BaseBoolInt] = None,
market_comment_delete: typing.Optional[BaseBoolInt] = None,
market_comment_restore: typing.Optional[BaseBoolInt] = None,
poll_vote_new: typing.Optional[BaseBoolInt] = None,
group_join: typing.Optional[BaseBoolInt] = None,
group_leave: typing.Optional[BaseBoolInt] = None,
group_change_settings: typing.Optional[bool] = None,
group_change_photo: typing.Optional[bool] = None,
group_officers_edit: typing.Optional[bool] = None,
user_block: typing.Optional[bool] = None,
user_unblock: typing.Optional[bool] = None,
like_add: typing.Optional[bool] = None,
like_remove: typing.Optional[bool] = None,
message_event: typing.Optional[bool] = None,
donut_subscription_create: typing.Optional[bool] = None,
donut_subscription_prolonged: typing.Optional[bool] = None,
donut_subscription_cancelled: typing.Optional[bool] = None,
donut_subscription_price_changed: typing.Optional[bool] = None,
donut_subscription_expired: typing.Optional[bool] = None,
donut_money_withdraw: typing.Optional[bool] = None,
donut_money_withdraw_error: typing.Optional[bool] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id: - Community ID.
:param enabled: - Sets whether Long Poll is enabled ('0' — disabled, '1' — enabled).
:param api_version:
:param message_new: - A new incoming message has been received ('0' — disabled, '1' — enabled).
:param message_reply: - A new outcoming message has been received ('0' — disabled, '1' — enabled).
:param message_allow: - Allowed messages notifications ('0' — disabled, '1' — enabled).
:param message_deny: - Denied messages notifications ('0' — disabled, '1' — enabled).
:param message_edit: - A message has been edited ('0' — disabled, '1' — enabled).
:param message_typing_state:
:param photo_new: - New photos notifications ('0' — disabled, '1' — enabled).
:param audio_new: - New audios notifications ('0' — disabled, '1' — enabled).
:param video_new: - New videos notifications ('0' — disabled, '1' — enabled).
:param wall_reply_new: - New wall replies notifications ('0' — disabled, '1' — enabled).
:param wall_reply_edit: - Wall replies edited notifications ('0' — disabled, '1' — enabled).
:param wall_reply_delete: - A wall comment has been deleted ('0' — disabled, '1' — enabled).
:param wall_reply_restore: - A wall comment has been restored ('0' — disabled, '1' — enabled).
:param wall_post_new: - New wall posts notifications ('0' — disabled, '1' — enabled).
:param wall_repost: - New wall posts notifications ('0' — disabled, '1' — enabled).
:param board_post_new: - New board posts notifications ('0' — disabled, '1' — enabled).
:param board_post_edit: - Board posts edited notifications ('0' — disabled, '1' — enabled).
:param board_post_restore: - Board posts restored notifications ('0' — disabled, '1' — enabled).
:param board_post_delete: - Board posts deleted notifications ('0' — disabled, '1' — enabled).
:param photo_comment_new: - New comment to photo notifications ('0' — disabled, '1' — enabled).
:param photo_comment_edit: - A photo comment has been edited ('0' — disabled, '1' — enabled).
:param photo_comment_delete: - A photo comment has been deleted ('0' — disabled, '1' — enabled).
:param photo_comment_restore: - A photo comment has been restored ('0' — disabled, '1' — enabled).
:param video_comment_new: - New comment to video notifications ('0' — disabled, '1' — enabled).
:param video_comment_edit: - A video comment has been edited ('0' — disabled, '1' — enabled).
:param video_comment_delete: - A video comment has been deleted ('0' — disabled, '1' — enabled).
:param video_comment_restore: - A video comment has been restored ('0' — disabled, '1' — enabled).
:param market_comment_new: - New comment to market item notifications ('0' — disabled, '1' — enabled).
:param market_comment_edit: - A market comment has been edited ('0' — disabled, '1' — enabled).
:param market_comment_delete: - A market comment has been deleted ('0' — disabled, '1' — enabled).
:param market_comment_restore: - A market comment has been restored ('0' — disabled, '1' — enabled).
:param poll_vote_new: - A vote in a public poll has been added ('0' — disabled, '1' — enabled).
:param group_join: - Joined community notifications ('0' — disabled, '1' — enabled).
:param group_leave: - Left community notifications ('0' — disabled, '1' — enabled).
:param group_change_settings:
:param group_change_photo:
:param group_officers_edit:
:param user_block: - User added to community blacklist
:param user_unblock: - User removed from community blacklist
:param like_add:
:param like_remove:
:param message_event:
:param donut_subscription_create:
:param donut_subscription_prolonged:
:param donut_subscription_cancelled:
:param donut_subscription_price_changed:
:param donut_subscription_expired:
:param donut_money_withdraw:
:param donut_money_withdraw_error:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("setLongPollSettings", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def set_settings(
self,
group_id: int,
return_raw_response: bool = False,
messages: typing.Optional[bool] = None,
bots_capabilities: typing.Optional[bool] = None,
bots_start_button: typing.Optional[bool] = None,
bots_add_to_chat: typing.Optional[bool] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id:
:param messages:
:param bots_capabilities:
:param bots_start_button:
:param bots_add_to_chat:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("setSettings", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def set_user_note(
self,
group_id: int,
user_id: int,
return_raw_response: bool = False,
note: typing.Optional[str] = None,
) -> typing.Union[dict, BaseBoolResponse]:
"""
:param group_id:
:param user_id:
:param note: - Note body
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("setUserNote", params)
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result
async def tag_add(
self,
group_id: int,
tag_name: str,
return_raw_response: bool = False,
tag_color: typing.Optional[str] = None,
) -> typing.Union[dict, BaseBoolResponse]:
"""
:param group_id:
:param tag_name:
:param tag_color:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("tagAdd", params)
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result
async def tag_bind(
self,
group_id: int,
tag_id: int,
user_id: int,
act: str,
return_raw_response: bool = False,
) -> typing.Union[dict, BaseBoolResponse]:
"""
:param group_id:
:param tag_id:
:param user_id:
:param act: - Describe the action
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("tagBind", params)
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result
async def tag_delete(
self,
group_id: int,
tag_id: int,
return_raw_response: bool = False,
) -> typing.Union[dict, BaseBoolResponse]:
"""
:param group_id:
:param tag_id:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("tagDelete", params)
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result
async def tag_update(
self,
group_id: int,
tag_id: int,
tag_name: str,
return_raw_response: bool = False,
) -> typing.Union[dict, BaseBoolResponse]:
"""
:param group_id:
:param tag_id:
:param tag_name:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("tagUpdate", params)
if return_raw_response:
return raw_result
result = BaseBoolResponse(**raw_result)
return result
async def toggle_market(
self,
group_id: int,
state: str,
return_raw_response: bool = False,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id:
:param state:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("toggleMarket", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
async def unban(
self,
group_id: int,
return_raw_response: bool = False,
owner_id: typing.Optional[int] = None,
) -> typing.Union[dict, BaseOkResponse]:
"""
:param group_id:
:param owner_id:
:param return_raw_response: - return result at dict
:return:
"""
params = get_params(locals())
raw_result = await self.api_request("unban", params)
if return_raw_response:
return raw_result
result = BaseOkResponse(**raw_result)
return result
| 40.28805 | 992 | 0.628181 |
acf9fe2458b739b4efb5ea91f32fb35f6c065558 | 8,840 | py | Python | gs_quant/datetime/rules.py | TopherD1992/gs-quant | 253ed75519abbbe407e17e39ca5ed7340fa010dc | [
"Apache-2.0"
] | 1 | 2021-01-06T06:25:40.000Z | 2021-01-06T06:25:40.000Z | gs_quant/datetime/rules.py | TopherD1992/gs-quant | 253ed75519abbbe407e17e39ca5ed7340fa010dc | [
"Apache-2.0"
] | null | null | null | gs_quant/datetime/rules.py | TopherD1992/gs-quant | 253ed75519abbbe407e17e39ca5ed7340fa010dc | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019 Goldman Sachs.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import calendar
import logging
from abc import ABC, abstractmethod
from datetime import date, timedelta
from typing import List, Union
from cachetools import TTLCache
from cachetools.keys import hashkey
from dateutil.relativedelta import relativedelta, FR, SA, SU, TH, TU, WE, MO
from gs_quant.target.common import Currency
from numpy import busday_offset
from pandas import Series, to_datetime, DataFrame
from gs_quant.api.gs.data import GsDataApi
from gs_quant.markets.securities import ExchangeCode
DATE_LOW_LIMIT = date(1952, 1, 1)
DATE_HIGH_LIMIT = date(2052, 12, 31)
_cache = TTLCache(maxsize=128, ttl=600)
_logger = logging.getLogger(__name__)
class RDateRule(ABC):
result: date
number: int
week_mask: str
currencies: List[Union[Currency, str]] = None
exchanges: List[Union[ExchangeCode, str]] = None
holiday_calendar: List[date] = None
def __init__(self, result: date, **params):
self.result = result
self.number = params.get('number')
self.week_mask = params.get('week_mask')
self.currencies = params.get('currencies')
self.exchanges = params.get('exchanges')
self.holiday_calendar = params.get('holiday_calendar')
super().__init__()
@abstractmethod
def handle(self) -> date:
"""
Handle RDate Rule. Use any available class field to compute.
:return: date
"""
pass
def _get_holidays(self, use_usd: bool = True) -> List[date]:
if self.holiday_calendar:
return self.holiday_calendar
try:
currencies = self.currencies or []
if use_usd:
currencies.append('USD')
if self.exchanges:
cached_data = _cache.get(hashkey(use_usd, str(currencies), str(self.exchanges)))
if cached_data:
return cached_data
holidays = Series()
if self.exchanges:
self.exchanges = [x.value if isinstance(x, ExchangeCode) else x.upper() for x in self.exchanges]
exchange_query = GsDataApi.build_query(start=DATE_LOW_LIMIT, end=DATE_HIGH_LIMIT,
exchange=self.exchanges)
data = GsDataApi.query_data(exchange_query, 'HOLIDAY')
holidays = holidays.append(Series(to_datetime(DataFrame(data)['date']).dt.date))
if len(currencies):
currencies = [x.value if isinstance(x, Currency) else x.upper() for x in currencies]
currency_query = GsDataApi.build_query(start=DATE_LOW_LIMIT, end=DATE_HIGH_LIMIT,
currency=currencies)
data = GsDataApi.query_data(currency_query, 'HOLIDAY_CURRENCY')
holidays = holidays.append(Series(to_datetime(DataFrame(data)['date']).dt.date))
holidays = holidays.unique().tolist()
_cache[hashkey(use_usd, str(currencies), str(self.exchanges))] = holidays
return holidays
except Exception as e:
_logger.warning('Unable to fetch holiday calendar. Try passing your own when applying a rule.', e)
return []
def _apply_business_days_logic(self, holidays: List[date], offset: int = None, roll: str = 'preceding'):
if offset is not None:
offset_to_use = offset
else:
offset_to_use = self.number if self.number else 0
return to_datetime(busday_offset(self.result, offset_to_use, roll,
holidays=holidays, weekmask=self.week_mask)).date()
def _get_nth_day_of_month(self, calendar_day):
temp = self.result.replace(day=1)
adj = (calendar_day - temp.weekday()) % 7
temp += relativedelta(days=adj)
temp += relativedelta(weeks=self.number - 1)
return temp
def add_years(self, holidays: List[date]):
self.result = (self.result + relativedelta(years=self.number))
if self.result.isoweekday() in {6, 7}:
self.result += timedelta(days=self.result.isoweekday() % 5)
return self._apply_business_days_logic(holidays, offset=0)
@staticmethod
def is_weekend(d: date):
return False if d.weekday() < 5 else True # 5 Sat, 6 Sun
class ARule(RDateRule):
def handle(self) -> date:
result = self.result.replace(month=1, day=1)
return result + relativedelta(year=self.number)
class bRule(RDateRule):
def handle(self) -> date:
holidays = self._get_holidays()
return self._apply_business_days_logic(holidays)
class dRule(RDateRule):
def handle(self) -> date:
return self.result + relativedelta(days=self.number)
class eRule(RDateRule):
def handle(self) -> date:
month_range = calendar.monthrange(self.result.year, self.result.month)
return self.result.replace(day=month_range[1])
class FRule(RDateRule):
def handle(self) -> date:
return self._get_nth_day_of_month(calendar.FRIDAY)
class gRule(RDateRule):
def handle(self) -> date:
self.result = self.result + relativedelta(weeks=self.number)
return self._apply_business_days_logic([])
class NRule(RDateRule):
def handle(self) -> date:
return self.result + relativedelta(weekday=MO(self.number))
class GRule(RDateRule):
def handle(self) -> date:
return self.result + relativedelta(weekday=FR(self.number))
class IRule(RDateRule):
def handle(self) -> date:
return self.result + relativedelta(weekday=SA(self.number))
class kRule(RDateRule):
def handle(self) -> date:
return self.add_years(self._get_holidays())
class MRule(RDateRule):
def handle(self) -> date:
return self._get_nth_day_of_month(calendar.MONDAY)
class PRule(RDateRule):
def handle(self) -> date:
return self.result + relativedelta(weekday=SU(self.number))
class rRule(RDateRule):
def handle(self) -> date:
return self.result.replace(month=12, day=31) + relativedelta(years=self.number)
class RRule(RDateRule):
def handle(self) -> date:
return self._get_nth_day_of_month(calendar.THURSDAY)
class SRule(RDateRule):
def handle(self) -> date:
return self.result + relativedelta(weekday=TH(self.number))
class TRule(RDateRule):
def handle(self) -> date:
return self._get_nth_day_of_month(calendar.TUESDAY)
class uRule(RDateRule):
def handle(self) -> date:
holidays = self._get_holidays(use_usd=False)
return self._apply_business_days_logic(holidays)
class URule(RDateRule):
def handle(self) -> date:
return self.result + relativedelta(weekday=TU(self.number))
class vRule(RDateRule):
def handle(self) -> date:
self.result = self.result + relativedelta(months=self.number) if self.number else self.result
month_range = calendar.monthrange(self.result.year, self.result.month)
self.result = self.result.replace(day=month_range[1])
holidays = self._get_holidays(use_usd=False)
return self._apply_business_days_logic(holidays, offset=0, roll='backward')
class VRule(RDateRule):
def handle(self) -> date:
return self._get_nth_day_of_month(calendar.SATURDAY)
class WRule(RDateRule):
def handle(self) -> date:
return self._get_nth_day_of_month(calendar.WEDNESDAY)
class wRule(RDateRule):
def handle(self) -> date:
self.result = self.result + relativedelta(weeks=self.number)
holidays = self._get_holidays()
return self._apply_business_days_logic(holidays)
class xRule(RDateRule):
def handle(self) -> date:
month_range = calendar.monthrange(self.result.year, self.result.month)
self.result = self.result.replace(day=month_range[1])
holidays = self._get_holidays()
return self._apply_business_days_logic(holidays, offset=0, roll='backward')
class XRule(RDateRule):
def handle(self) -> date:
return self.result + relativedelta(weekday=WE(self.number))
class yRule(RDateRule):
def handle(self) -> date:
return self.add_years([])
class ZRule(RDateRule):
def handle(self) -> date:
return self._get_nth_day_of_month(calendar.SUNDAY)
| 33.358491 | 112 | 0.667421 |
acf9fed2a429c47f186d13b6bfa944e368f112ff | 15,457 | py | Python | tests/server/grid_test.py | WIPACrepo/iceprod | 83615da9b0e764bc2498ac588cc2e2b3f5277235 | [
"MIT"
] | 2 | 2017-01-23T17:12:41.000Z | 2019-01-14T13:38:17.000Z | tests/server/grid_test.py | WIPACrepo/iceprod | 83615da9b0e764bc2498ac588cc2e2b3f5277235 | [
"MIT"
] | 242 | 2016-05-09T18:46:51.000Z | 2022-03-31T22:02:29.000Z | tests/server/grid_test.py | WIPACrepo/iceprod | 83615da9b0e764bc2498ac588cc2e2b3f5277235 | [
"MIT"
] | 2 | 2017-03-27T09:13:40.000Z | 2019-01-27T10:55:30.000Z | """
Test script for grid
"""
from __future__ import absolute_import, division, print_function
from tests.util import unittest_reporter, glob_tests, services_mock
import logging
logger = logging.getLogger('grid_test')
import os
import sys
import time
import random
from datetime import datetime,timedelta
from contextlib import contextmanager
import shutil
import socket
import tempfile
from multiprocessing import Queue,Pipe
try:
import cPickle as pickle
except:
import pickle
import unittest
from unittest.mock import patch, MagicMock
import tornado.gen
from tornado.concurrent import Future
from tornado.testing import AsyncTestCase
import iceprod.server
from iceprod.server import module
from iceprod.server.grid import BaseGrid
from iceprod.core.resources import Resources
from rest_tools.client import RestClient
from .module_test import module_test, TestExecutor
class grid_test(AsyncTestCase):
def setUp(self):
super(grid_test,self).setUp()
orig_dir = os.getcwd()
self.test_dir = tempfile.mkdtemp(dir=orig_dir)
os.chdir(self.test_dir)
def clean_dir():
os.chdir(orig_dir)
shutil.rmtree(self.test_dir)
self.addCleanup(clean_dir)
self.executor = TestExecutor()
# override self.db_handle
self.services = services_mock()
@unittest_reporter
def test_001_init(self):
site = 'thesite'
name = 'grid1'
gridspec = site+'.'+name
submit_dir = os.path.join(self.test_dir,'submit_dir')
cfg = {'site_id':site,
'queue':{'max_resets':5,
'submit_dir':submit_dir,
name:{'test':1}},
'db':{'address':None,'ssl':False}}
# call normal init
g = BaseGrid(gridspec, cfg['queue'][name], cfg, self.services,
self.io_loop, self.executor, module.FakeStatsClient(),
None)
self.assertTrue(g)
self.assertEqual(g.gridspec, gridspec)
self.assertEqual(g.queue_cfg, cfg['queue'][name])
self.assertEqual(g.cfg, cfg)
# call init with too few args
try:
g = BaseGrid(gridspec, cfg['queue'][name], cfg)
except:
pass
else:
raise Exception('too few args did not raise exception')
@patch('iceprod.server.grid.BaseGrid._delete_dirs')
@patch('iceprod.server.grid.BaseGrid.remove')
@patch('iceprod.server.grid.BaseGrid.get_grid_status')
@unittest_reporter
async def test_010_check_and_clean(self, get_grid_status, remove, delete_dirs):
"""Test check_and_clean"""
site = 'thesite'
name = 'grid1'
gridspec = site+'.'+name
submit_dir = os.path.join(self.test_dir,'submit_dir')
cfg = {'queue':{'max_resets':5,
'submit_dir':submit_dir,
name:{'test':1,'monitor_address':'localhost'}},
'db':{'address':None,'ssl':False}}
# init
client = MagicMock(spec=RestClient)
g = BaseGrid(gridspec, cfg['queue'][name], cfg, self.services,
self.io_loop, self.executor, module.FakeStatsClient(),
client)
if not g:
raise Exception('init did not return grid object')
# call with empty queue
f = Future()
f.set_result({})
client.request.return_value = f
f = Future()
f.set_result({})
get_grid_status.return_value = f
await g.check_and_clean()
self.assertEqual(client.request.call_args_list[0][0][1], '/pilots')
get_grid_status.assert_called()
remove.assert_not_called()
delete_dirs.assert_not_called()
# call with one pilot in iceprod, nothing on queue
client.request.reset_mock()
host = socket.getfqdn()
f = Future()
f.set_result({'123':{'pilot_id':'123','grid_queue_id':'foo','submit_dir':'bar','queue_host':host}})
client.request.return_value = f
f = Future()
f.set_result({})
get_grid_status.return_value = f
f = Future()
f.set_result(MagicMock())
remove.return_value = f
f = Future()
f.set_result(MagicMock())
delete_dirs.return_value = f
await g.check_and_clean()
self.assertEqual(client.request.call_args_list[0][0][1], '/pilots')
get_grid_status.assert_called()
remove.assert_not_called()
delete_dirs.assert_not_called()
self.assertEqual(client.request.call_args_list[1][0][1], '/pilots/123')
@patch('iceprod.server.grid.BaseGrid.setup_pilots')
@unittest_reporter
async def test_011_queue(self, setup_pilots):
f = Future()
f.set_result(None)
setup_pilots.return_value = f
site = 's1'
name = 'grid1'
gridspec = site+'.'+name
submit_dir = os.path.join(self.test_dir,'submit_dir')
cfg = {'site_id':site,
'queue':{'max_resets':5,
'submit_dir':submit_dir,
name:{'tasks_on_queue':[1,5,2],
'monitor_address':'localhost'}},
'db':{'address':None,'ssl':False}}
# init
client = MagicMock(spec=RestClient)
g = BaseGrid(gridspec, cfg['queue'][name], cfg, self.services,
self.io_loop, self.executor, module.FakeStatsClient(),
client)
if not g:
raise Exception('init did not return grid object')
tasks = [{'task_id':'1', 'dataset_id':'bar', 'status_changed':'2017', 'requirements':{}},
{'task_id':'2', 'dataset_id':'bar', 'status_changed':'2017', 'requirements':{}},
{'task_id':'3', 'dataset_id':'baz', 'status_changed':'2017', 'requirements':{}},]
dataset = {'dataset_id':'bar', 'priority':1}
dataset2 = {'dataset_id':'baz', 'priority':2}
async def req(method, path, args=None):
logger.info('req path=%r, args=%r', path, args)
if 'task' in path:
return {'tasks':tasks.copy()}
if 'bar' in path:
return dataset
else:
return dataset2
client.request.side_effect = req
# call normally
g.tasks_queued = 0
await g.queue()
self.assertTrue(setup_pilots.called)
expected = [tasks[2], tasks[0], tasks[1]]
self.assertEqual(setup_pilots.call_args[0][0], expected)
@patch('iceprod.server.grid.BaseGrid.setup_submit_directory')
@patch('iceprod.server.grid.BaseGrid.submit')
@unittest_reporter
async def test_020_setup_pilots(self, submit, setup_submit_directory):
async def submit_func(task):
task['grid_queue_id'] = ','.join('123' for _ in range(task['num']))
submit.side_effect = submit_func
f = Future()
f.set_result(None)
setup_submit_directory.return_value = f
site = 'thesite'
self.check_run_stop = False
name = 'grid1'
gridspec = site+'.'+name
submit_dir = os.path.join(self.test_dir,'submit_dir')
cfg = {'site_id':site,
'queue':{'max_resets':5,
'submit_dir':submit_dir,
name:{'queueing_factor_priority':1,
'queueing_factor_dataset':1,
'queueing_factor_tasks':1,
'max_task_queued_time':1000,
'max_task_processing_time':1000,
'max_task_reset_time':300,
'pilots_on_queue': [5,10],
'ping_interval':60,
'monitor_address':'localhost'
}},
'db':{'address':None,'ssl':False}}
# init
client = MagicMock(spec=RestClient)
g = BaseGrid(gridspec, cfg['queue'][name], cfg, self.services,
self.io_loop, self.executor, module.FakeStatsClient(),
client)
if not g:
raise Exception('init did not return grid object')
pilot_ids = list(range(100))
async def req(method, path, args=None):
logger.info('req path=%r, args=%r', path, args)
if method == 'GET':
return {'foo':{'pilot_id':'foo','host':None,'resources':{'cpu':1,'gpu':0,'disk':10,'memory':3,'time':1}},
'bar':{'pilot_id':'bar','host':'baz','resources':{}},
}
elif method == 'POST':
return {'result':str(pilot_ids.pop(0))}
else: # PATCH
req.num_queued += 1
return None
req.num_queued = 0
client.request.side_effect = req
# call normally
tasks = [{'task_id':'3', 'dataset_id':'baz', 'requirements':{'cpu':1,'memory':4}},
{'task_id':'1', 'dataset_id':'bar', 'requirements':{'cpu':1,'memory':2}},
{'task_id':'2', 'dataset_id':'bar', 'requirements':{'cpu':1,'memory':2}},]
await g.setup_pilots(tasks)
self.assertTrue(submit.called)
self.assertTrue(setup_submit_directory.called)
self.assertEqual(req.num_queued, 3)
self.assertEqual(req.num_queued, 3)
# test error
setup_submit_directory.side_effect = Exception()
await g.setup_pilots(tasks)
f = Future()
f.set_result(None)
setup_submit_directory.return_value = f
submit.side_effect = Exception()
await g.setup_pilots(tasks)
@patch('iceprod.server.grid.BaseGrid.generate_submit_file')
@patch('iceprod.server.grid.BaseGrid.write_cfg')
@unittest_reporter
async def test_023_setup_submit_directory(self, write_cfg, generate_submit_file):
site = 'thesite'
self.check_run_stop = False
name = 'grid1'
gridspec = site+'.'+name
submit_dir = os.path.join(self.test_dir,'submit_dir')
cfg = {'site_id':site,
'queue':{'max_resets':5,
'submit_dir':submit_dir,
name:{'tasks_on_queue':[1,5,2],
'max_task_queued_time':1000,
'max_task_processing_time':1000,
'max_task_reset_time':300,
'ping_interval':60,
'monitor_address':'localhost'
}
},
}
# init
client = MagicMock(spec=RestClient)
g = BaseGrid(gridspec, cfg['queue'][name], cfg, self.services,
self.io_loop, self.executor, module.FakeStatsClient(),
client)
if not g:
raise Exception('init did not return grid object')
# call normally
tokens = list(range(100,200))
async def req(method, path, args=None):
logger.info('req path=%r, args=%r', path, args)
return {'result':str(tokens.pop(0))}
req.num_queued = 0
client.request.side_effect = req
f = Future()
f.set_result(None)
generate_submit_file.return_value = f
write_cfg.return_value = (None, None)
task = {'task_id':'1','name':'0','debug':0,'dataset_id':'d1',
'job':0,'jobs_submitted':1}
await g.setup_submit_directory(task)
self.assertTrue(generate_submit_file.called)
self.assertTrue(write_cfg.called)
@unittest_reporter
def test_026_write_cfg(self):
site = 'thesite'
self.check_run_stop = False
name = 'grid1'
gridspec = site+'.'+name
submit_dir = os.path.join(self.test_dir,'submit_dir')
cfg = {'site_id':site,
'queue':{'max_resets':5,
'submit_dir':submit_dir,
name:{'tasks_on_queue':[1,5,2],
'max_task_queued_time':1000,
'max_task_processing_time':1000,
'max_task_reset_time':300,
'ping_interval':60,
'monitor_address':'localhost'
}
},
}
# init
client = MagicMock(spec=RestClient)
g = BaseGrid(gridspec, cfg['queue'][name], cfg, self.services,
self.io_loop, self.executor, module.FakeStatsClient(),
client)
if not g:
raise Exception('init did not return grid object')
# call normally
task = {'task_id':'1','name':'0','debug':0,'dataset_id':'d1',
'job':0,'jobs_submitted':1,'submit_dir':submit_dir}
config, filelist = g.write_cfg(task)
self.assertEqual(filelist[0], os.path.join(submit_dir,'task.cfg'))
self.assertTrue(os.path.exists(filelist[0]))
# call with extra opts
task = {'task_id':'1','name':'0','debug':0,'submit_dir':submit_dir,
'reqs':{'OS':'RHEL6'}}
cfg['queue']['site_temp'] = 'tmp'
cfg['download'] = {'http_username':'foo','http_password':'bar'}
cfg['system'] = {'remote_cacert': 'baz'}
with open('baz', 'w') as f:
f.write('bazbaz')
cfg['queue']['x509proxy'] = 'x509'
with open('x509', 'w') as f:
f.write('x509x509')
config, filelist = g.write_cfg(task)
self.assertEqual(filelist[0], os.path.join(submit_dir,'task.cfg'))
self.assertTrue(os.path.exists(filelist[0]))
self.assertEqual(len(filelist),3)
self.assertIn('baz', filelist[1])
self.assertIn('x509', filelist[2])
@unittest_reporter
def test_100_get_resources(self):
tasks = [
{'reqs':{'cpu':1,'memory':4.6}},
]
reqs = list(BaseGrid._get_resources(tasks))
self.assertIn('cpu', reqs[0])
self.assertEqual(reqs[0]['cpu'], tasks[0]['reqs']['cpu'])
self.assertIn('memory', reqs[0])
self.assertEqual(reqs[0]['memory'], tasks[0]['reqs']['memory'])
tasks = [
{'reqs':{'os':'RHEL_7_x86_64'}},
]
reqs = list(BaseGrid._get_resources(tasks))
self.assertIn('os', reqs[0])
self.assertEqual(reqs[0]['os'], tasks[0]['reqs']['os'])
tasks = [
{'reqs':{'cpu':1,'memory':4.6,'foo':'bar'}},
]
reqs = list(BaseGrid._get_resources(tasks))
self.assertIn('cpu', reqs[0])
self.assertEqual(reqs[0]['cpu'], tasks[0]['reqs']['cpu'])
self.assertIn('memory', reqs[0])
self.assertEqual(reqs[0]['memory'], tasks[0]['reqs']['memory'])
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
alltests = glob_tests(loader.getTestCaseNames(grid_test))
suite.addTests(loader.loadTestsFromNames(alltests,grid_test))
return suite
| 37.335749 | 122 | 0.544284 |
acf9ff45411cc08d0856bdcb3002fa4bb2aca971 | 12,004 | py | Python | vimms/Controller/misc.py | hechth/vimms | ce5922578cf225d46cb285da8e7af97b5321f5aa | [
"MIT"
] | 6 | 2021-04-12T14:03:55.000Z | 2022-03-08T19:40:36.000Z | vimms/Controller/misc.py | hechth/vimms | ce5922578cf225d46cb285da8e7af97b5321f5aa | [
"MIT"
] | 43 | 2021-04-19T09:46:22.000Z | 2022-03-29T15:13:29.000Z | vimms/Controller/misc.py | hechth/vimms | ce5922578cf225d46cb285da8e7af97b5321f5aa | [
"MIT"
] | 1 | 2021-12-07T08:17:01.000Z | 2021-12-07T08:17:01.000Z | import math
import copy
import itertools
import subprocess
from pathlib import Path
import numpy as np
from loguru import logger
from vimms.Controller.base import Controller
from vimms.Common import *
class FixedScansController(Controller):
"""
A controller which takes a schedule of scans, converts them into tasks in queue
"""
def __init__(self, schedule=None, params=None):
"""
Creates a FixedScansController that accepts a list of schedule of scan parameters
:param schedule: a list of ScanParameter objects
:param params: mass spec advanced parameters, if any
"""
super().__init__(params=params)
self.tasks = None
self.initial_task = None
if schedule is not None and len(schedule) > 0:
# if schedule is provided, set it
self.set_tasks(schedule)
def get_initial_tasks(self):
"""
Returns all the remaining scan parameter objects to be pushed to the mass spec queue
:return: all the remaining tasks
"""
assert self.tasks is not None # the remaining scan parameters in the schedule must have been set
return self.tasks
def get_initial_scan_params(self):
"""
Returns the initial scan parameter object to send when acquisition starts
:return: the initial task
"""
assert self.initial_task is not None # the first scan parameters in the schedule must have been set
return self.initial_task
def set_tasks(self, schedule):
"""
Set the fixed schedule of tasks in this controller
:param schedule: a list of scan parameter objects
:return: None
"""
assert isinstance(schedule, list)
self.initial_task = schedule[0] # used for sending the first scan
self.tasks = schedule[1:] # used for sending all the other scans
def handle_scan(self, scan, current_size, pending_size):
# simply record every scan that we've received, but return no new tasks
logger.debug('Time %f Received %s' % (scan.rt, scan))
self.scans[scan.ms_level].append(scan)
return []
def update_state_after_scan(self, last_scan):
pass
class MS2PlannerController(FixedScansController):
@staticmethod
def mzmine2ms2planner(inpath, outpath):
'''Transform mzmine2 box file to ms2planner default format.'''
records = []
with open(inpath, "r") as f:
fields = {}
for i, name in enumerate(f.readline().split(",")):
if(not name in fields): fields[name] = list()
fields[name].append(i)
mz = fields["row m/z"][0]
rt = fields["row retention time"][0]
charges = next(idxes for fd, idxes in fields.items() if fd.strip().endswith("Peak charge"))
intensities = next(idxes for fd, idxes in fields.items() if fd.strip().endswith("Peak height"))
for ln in f:
sp = ln.split(",")
for charge, intensity in zip(charges, intensities):
records.append([
sp[mz],
str(float(sp[rt]) * 60),
sp[charge],
"1",
sp[intensity]
])
out_headers = ["Mass [m/z]", "retention_time", "charge", "Blank", "Sample"]
with open(outpath, "w+") as f:
f.write(",".join(out_headers) + "\n")
for r in records: f.write(",".join(r) + "\n")
@staticmethod
def minimise_single(x, target):
if(target < 0): return 0
c = int(target // x)
return min(c, c+1, key=lambda c: abs(target - c * x))
@staticmethod
def minimise_distance(target, *args):
'''Solve argmin(a1, a2 ... an)(a1x1 + ... + anxn - t) for non-negative integer a1...an and non-negative reals x1...xn, t using backtracking search.
i.e. Schedule tasks of different fixed lengths s.t. the last task ends as close to the target time as possible.
'''
best_coefficients = (float("inf"), [])
stack = [MS2PlannerController.minimise_single(args[0], target)] if len(args) > 0 else []
while(stack != []):
remainder = target - sum(s * a for s, a in zip(stack, args))
for i in range(len(stack), len(args)):
c = MS2PlannerController.minimise_single(args[i], remainder)
stack.append(c)
remainder -= c * args[i]
dist = abs(remainder)
if(not math.isclose(dist, best_coefficients[0]) and dist < best_coefficients[0]): best_coefficients = (dist, copy.copy(stack))
#if(dist < best_coefficients[0]): best_coefficients = (dist, copy.copy(stack))
#if(dist < best_coefficients[0]):
# if(math.isclose(dist, best_coefficients[0])): print(f"IS CLOSE, DIST: {dist}, CHAMP DIST: {best_coefficients[0]}, STACK: {stack}, CHAMPION: {best_coefficients[1]}")
# best_coefficients = (dist, copy.copy(stack))
stack.pop()
while(stack != [] and stack[-1] <= 0): stack.pop()
if(stack != []): stack[-1] -= 1
return best_coefficients[1]
@staticmethod
def parse_ms2planner(fpath):
schedules = []
fields = ["mz_centre", "mz_isolation", "duration", "rt_start", "rt_end", "intensity", "apex_rt", "charge"]
with open(fpath, "r") as f:
for path in f:
schedules.append([])
for scan in path.strip().split("\t")[1:]:
schedules[-1].append(dict(zip(fields, map(float, scan.split(" ")))))
return schedules
@staticmethod
def sched_dict2params(schedule, scan_duration_dict):
'''Scan_duration_dict matches the format of MS scan_duration_dict with _fixed_ scan lengths.'''
time, new_sched = 0, []
srted = sorted(schedule, key=lambda s: s["rt_start"])
print("Schedule times: {}".format([s["rt_start"] for s in srted]))
print(f"NUM SCANS IN SCHEDULE FILE: {len(schedule)}")
#new_sched.append(get_default_scan_params())
#scan_duration_dict = {1: 0.2, 2: 0.2}
id_count = INITIAL_SCAN_ID
for ms2 in srted:
filler = MS2PlannerController.minimise_distance(ms2["rt_start"] - time, scan_duration_dict[1], scan_duration_dict[2])
print(f"filler_scans: {filler}")
for i in range(filler[0]):
sp = get_default_scan_params()
new_sched.append(sp)
id_count += 1
for i in range(filler[1]):
#print(f"sid: {id_count}")
new_sched.append(get_dda_scan_param(0, 0.0, id_count, ms2["mz_isolation"] * 2, 0.0, 0.0))
id_count += 1
new_sched.append(get_dda_scan_param(ms2["mz_centre"], 0.0, id_count, ms2["mz_isolation"] * 2, 0.0, 0.0))
id_count += 1
times = [time, scan_duration_dict[1] * filler[0], scan_duration_dict[2] * filler[1]]
time += sum(c * scan_duration_dict[i+1] for i, c in enumerate(filler)) + scan_duration_dict[2]
print(f"Start time: {times[0]}, MS1 duration: {times[1]}, MS2 duration: {times[2]}, End time: {time}")
print(f"schedule_length: {len(new_sched)}")
print(f"Durations: {scan_duration_dict}")
return new_sched
@staticmethod
def from_fullscan(ms2planner_dir,
fullscan_file,
fullscan_mzmine_table,
out_file,
intensity_threshold,
intensity_ratio,
num_injections,
intensity_accu,
restriction,
isolation,
delay,
min_rt,
max_rt,
scan_duration_dict,
params=None,
cluster_method="kNN",
userpython="python"):
converted = os.path.join(os.path.dirname(out_file), "mzmine2ms2planner.txt")
MS2PlannerController.mzmine2ms2planner(fullscan_mzmine_table, converted)
subprocess.run(
[
userpython,
os.path.join(ms2planner_dir, "path_finder.py"),
"curve",
converted,
#os.path.join(ms2planner_dir, "test", "Blank_to_Sample_mrgd.csv"),
out_file,
str(intensity_threshold),
str(intensity_ratio),
str(num_injections),
"-infile_raw", str(fullscan_file),
"-intensity_accu", str(intensity_accu),
"-restriction", str(restriction[0]), str(restriction[1]),
"-isolation", str(isolation),
"-delay", str(delay),
"-min_scan", str(min_rt),
"-max_scan", str(max_rt),
"-cluster", str(cluster_method)
]
)
schedules = [MS2PlannerController.sched_dict2params(sch, scan_duration_dict) for sch in MS2PlannerController.parse_ms2planner(out_file)]
with open(os.path.join(os.path.dirname(out_file), "scan_params.txt"), "w+") as f:
for i, schedule in enumerate(schedules):
f.write(f"SCHEDULE {i}\n\n")
f.write("".join(f"SCAN {j}: {scan}\n\n" for j, scan in enumerate(schedule)))
return [MS2PlannerController(schedule=schedule, params=params) for schedule in schedules]
class MatchingController(FixedScansController):
@staticmethod
def from_matching(matching, isolation_width, params=None):
return [MatchingController(schedule=schedule, params=params) for schedule in matching.make_schedules(isolation_width)]
class MultiIsolationController(Controller):
def __init__(self, N, isolation_width=DEFAULT_ISOLATION_WIDTH, params=None):
super().__init__(params=params)
assert N > 1
self.N = N
self.isolation_width = isolation_width
self.mz_tol = 10
self.rt_tol = 15
def _make_scan_order(self, N):
# makes a list of tuples, each saying which precuror idx in the sorted
# list should be in which MS2 scan
initial_idx = range(N)
scan_order = []
for L in range(1, len(initial_idx) + 1):
for subset in itertools.combinations(initial_idx, L):
scan_order.append(subset)
return scan_order
def _process_scan(self, scan):
# if there's a previous ms1 scan to process
new_tasks = []
fragmented_count = 0
if self.scan_to_process is not None:
mzs = self.scan_to_process.mzs
intensities = self.scan_to_process.intensities
rt = self.scan_to_process.rt
idx = np.argsort(intensities)[::-1]
precursor_scan_id = self.scan_to_process.scan_id
scan_order = self._make_scan_order(min(self.N, len(mzs)))
for subset in scan_order:
mz = []
intensity = []
for s in subset:
mz.append(mzs[idx[s]])
intensity.append(mzs[idx[s]])
dda_scan_params = self.get_ms2_scan_params(mz, intensity, precursor_scan_id, self.isolation_width,
self.mz_tol, self.rt_tol)
new_tasks.append(dda_scan_params)
self.current_task_id += 1
ms1_scan_params = self.get_ms1_scan_params()
self.current_task_id += 1
self.next_processed_scan_id = self.current_task_id
new_tasks.append(ms1_scan_params)
return new_tasks
def update_state_after_scan(self, scan):
pass
| 43.02509 | 181 | 0.572809 |
acf9ff4a51644df628459790f3b657e38e247211 | 2,221 | py | Python | server.py | DianaKom/handmade-site | e47d48ace5fd6a8118abf719fbe24ea8acc4f13a | [
"MIT"
] | null | null | null | server.py | DianaKom/handmade-site | e47d48ace5fd6a8118abf719fbe24ea8acc4f13a | [
"MIT"
] | null | null | null | server.py | DianaKom/handmade-site | e47d48ace5fd6a8118abf719fbe24ea8acc4f13a | [
"MIT"
] | null | null | null | import smtplib
from email.message import EmailMessage
from flask import Flask, send_from_directory, render_template, request
from flask_admin import Admin
from db import db_session, Category, Product
from flask_admin.contrib.sqla import ModelView
app = Flask(__name__, static_url_path='')
admin = Admin(app, name='shop', template_mode='bootstrap3')
admin.add_view(ModelView(Category, db_session))
admin.add_view(ModelView(Product, db_session))
@app.route("/category/<int:id>")
def category(id):
cat = Category.query.get(id)
products_list = Product.query.filter(Product.category_id == cat.id).all()
print(products_list)
return render_template("categories.html", category=cat, products_list=products_list)
@app.route("/static/<path:path>")
def send_static(path):
return send_from_directory('static', path)
@app.route("/")
def index():
category_list = Category.query.all()
return render_template("index.html", category_list=category_list)
@app.route("/registration")
def register():
pass
@app.route("/products/<int:id>")
def product(id):
pro = Product.query.get(id)
return render_template("products.html", pro=pro)
@app.route("/buy", methods=['POST'])
def buy():
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls() # Puts connection to SMTP server in TLS mode
server.ehlo()
server.login("diana.mixis@gmail.com", "lindalovefire")
msg = EmailMessage()
msg['From'] = "diana.mixis@gmail.com"
msg['To'] = request.form['email']
msg['Subject'] = 'Thanks!'
msg.set_content("""\
Hello! Thanks for shopping! We send EMS all around the world. Please write back with the address! HandMade Dolls
""")
server.send_message(msg)
msg = EmailMessage()
msg['From'] = "diana.mixis@gmail.com"
msg['To'] = 'dianakom@hotmail.com'
msg['Subject'] = 'New customer'
msg.set_content("""\
User: {} Product: {} Quantity: {}
""".format(request.form['email'], request.form['product_id'], request.form['quantity']))
server.send_message(msg)
return 'User: {} Product: {}'.format(request.form['email'], request.form['product_id'])
if __name__ == "__main__":
app.debug = True
app.run(port=5000)
| 28.474359 | 116 | 0.692481 |
acf9ff68e443b7f60785f295f5b12059dfc896af | 14,508 | py | Python | lgr/tools/utils.py | icann/lgr-core | 482e1c2cc485eb666e8b3547644baf0e364ebc96 | [
"BSD-3-Clause"
] | 7 | 2017-07-10T22:39:52.000Z | 2021-06-25T20:19:28.000Z | lgr/tools/utils.py | icann/lgr-core | 482e1c2cc485eb666e8b3547644baf0e364ebc96 | [
"BSD-3-Clause"
] | 13 | 2016-10-26T19:42:00.000Z | 2021-12-13T19:43:42.000Z | lgr/tools/utils.py | icann/lgr-core | 482e1c2cc485eb666e8b3547644baf0e364ebc96 | [
"BSD-3-Clause"
] | 8 | 2016-11-07T15:40:27.000Z | 2020-09-22T13:48:52.000Z | # -*- coding: utf-8 -*-
"""
utils - List of utility functions for tools.
"""
from __future__ import unicode_literals
import argparse
import codecs
import io
import logging
import os
import sys
from io import BytesIO
from urllib.parse import urlparse
from urllib.request import urlopen
from lgr import text_type
from lgr.parser.xml_parser import XMLParser
from lgr.parser.xml_serializer import serialize_lgr_xml
from lgr.tools.merge_set import merge_lgr_set
from lgr.utils import cp_to_ulabel
from munidata import UnicodeDataVersionManager
logger = logging.getLogger(__name__)
def read_labels(input, unidb=None, do_raise=False, keep_commented=False, as_cp=False):
"""
Read a label file and format lines to get a list of correct labels
:param input: Input label list as an iterator of Unicode strings.
:param unidb: The UnicodeDatabase
:param do_raise: Whether the label parsing exceptions are raised or not
:param keep_commented: Whether commented labels are returned (still commented) or not
:param as_cp: If True, returns a list of code points per label. Otherwise, unicode string.
:return: [(label, valid, error)]
"""
labels = [l.strip() for l in input]
# remove comments
for label in labels:
if '#' in label:
pos = label.find('#')
if pos == 0:
if keep_commented:
yield label, True, ''
continue
label = label[:pos].strip()
if len(label) == 0:
continue
error = ''
valid = True
# transform U-label and A-label in unicode strings
try:
if unidb:
label = parse_label_input(label, idna_decoder=unidb.idna_decode_label, as_cp=as_cp)
else:
label = parse_label_input(label, as_cp=as_cp)
except BaseException as ex:
if do_raise:
raise
valid = False
error = text_type(ex)
yield label, valid, error
def parse_single_cp_input(s):
"""
Parses a single code points from user input
:param s: input
:return: code point
>>> parse_single_cp_input('z') == ord('z') # treat single character as a character
True
>>> parse_single_cp_input('1') == ord('1') # treat single character as a character, even if it is numeric
True
>>> parse_single_cp_input('a') == ord('a') # treat single character as a character, even if it looks like hex
True
>>> parse_single_cp_input('U+1') # U+ always treated as hex
1
>>> parse_single_cp_input('u+1') # U+ can be lowercase
1
>>> parse_single_cp_input(' u+1') # leading space ok
1
>>> parse_single_cp_input('u+1 ') # trailing space ok
1
>>> parse_single_cp_input(' u+1 ') # leading and trailing spaces ok
1
>>> parse_single_cp_input('U+10')
16
>>> parse_single_cp_input('U+10ffff') == 0x10FFFF
True
>>> parse_single_cp_input('U+10FFFF') == 0x10FFFF
True
>>> parse_single_cp_input('U+110000') # overflow
Traceback (most recent call last):
...
ValueError: code point value must be in the range [0, U+10FFFF]
>>> parse_single_cp_input('U+') # short # doctest: +IGNORE_EXCEPTION_DETAIL, +ELLIPSIS
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 16: ''
>>> parse_single_cp_input('0061 0062') # too many values # doctest: +IGNORE_EXCEPTION_DETAIL, +ELLIPSIS
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 16: '0061 0062'
"""
s = s.strip()
if len(s) == 1:
# unicode char
return ord(s)
else:
if s[:2].upper() == 'U+':
# U+XXXX
v = int(s[2:], 16)
else:
v = int(s, 16)
# check bounds
if v < 0 or v > 0x10FFFF:
raise ValueError("code point value must be in the range [0, U+10FFFF]")
return v
def parse_codepoint_input(s):
"""
Parses a code point or sequence of code points from user input
:param s: input
:return: list of code points
>>> parse_codepoint_input('0061')
[97]
>>> parse_codepoint_input('0061 0062')
[97, 98]
>>> parse_codepoint_input('0061 0062')
[97, 98]
>>> parse_codepoint_input('a')
[97]
>>> parse_codepoint_input('a b')
[97, 98]
>>> parse_codepoint_input('U+0061 U+0062')
[97, 98]
>>> parse_codepoint_input('U+0061 0062')
[97, 98]
"""
return [parse_single_cp_input(x) for x in s.split()]
def parse_label_input(s, idna_decoder=lambda x: x.encode('utf-8').decode('idna'), as_cp=True):
"""
Parses a label from user input, applying a bit of auto-detection smarts
:param s: input string in A-label, U-label or space-separated hex sequences.
:param idna_decoder: IDNA decode function.
:param as_cp: If True, returns a list of code points. Otherwise, unicode string.
:return: list of code points
>>> parse_label_input('0061') # treated as U-label - probably the only confusing result
[48, 48, 54, 49]
>>> parse_label_input('U+0061') # this is how to signal that you want hex
[97]
>>> parse_label_input('abc')
[97, 98, 99]
>>> parse_label_input('a b c')
[97, 98, 99]
>>> parse_label_input('xn--m-0ga') # "öm"
[246, 109]
"""
if s.lower().startswith('xn--'):
if as_cp:
return [ord(c) for c in idna_decoder(s.lower())]
else:
return idna_decoder(s.lower())
elif ' ' in s or 'U+' in s.upper():
try:
label_cp = parse_codepoint_input(s)
except:
if ' ' in s:
raise ValueError("Label '{}' contains spaces "
"that are not PVALID for IDNA2008".format(s))
raise
if as_cp:
return label_cp
else:
return cp_to_ulabel(label_cp)
else:
# treat as unicode
if as_cp:
return [ord(c) for c in s]
else:
return s
def merge_lgrs(input_lgrs, name=None, rng=None, unidb=None):
"""
Merge LGRs to create a LGR set
:param input_lgrs: The LGRs belonging to the set
:param name: The merged LGR name
:param rng: The RNG file to validate input LGRs
:param unidb: The unicode database
:return: The merged LGR and the LGRs in the set.
"""
lgr_set = []
for lgr_file in input_lgrs:
lgr_parser = XMLParser(lgr_file)
if unidb:
lgr_parser.unicode_database = unidb
if rng:
validation_result = lgr_parser.validate_document(rng)
if validation_result is not None:
logger.error('Errors for RNG validation of LGR %s: %s',
lgr_file, validation_result)
lgr = lgr_parser.parse_document()
if lgr is None:
logger.error("Error while parsing LGR file %s." % lgr_file)
logger.error("Please check compliance with RNG.")
return
lgr_set.append(lgr)
if not name:
name = 'merged-lgr-set'
merged_lgr = merge_lgr_set(lgr_set, name)
if unidb:
merged_lgr.unicode_database = unidb
return merged_lgr, lgr_set
# Helpers for CLI tools
def write_output(s, test=True):
if test:
if sys.version_info.major > 2:
print(s)
else:
print(s.encode('utf-8'))
def get_stdin():
if sys.version_info.major > 2:
# Python3 automagically convert to unicode
return sys.stdin
else:
return codecs.getreader('utf8')(sys.stdin)
def download_file(source_url):
base_url = urlparse(source_url).path
filename = os.path.basename(base_url)
with urlopen(source_url) as resp:
logger.debug("Retrieve %s at URL %s", filename, source_url)
data = BytesIO(resp.read())
return filename, data
def parse_lgr(xml, rng=None, unidb=None):
lgr_parser = XMLParser(xml)
if unidb:
lgr_parser.unicode_database = unidb
if rng is not None:
validation_result = lgr_parser.validate_document(rng)
if validation_result is not None:
logger.error('Errors for RNG validation of LGR file %s: %s', xml, validation_result)
return
lgr = lgr_parser.parse_document()
return lgr
class LgrToolArgParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(LgrToolArgParser, self).__init__(*args, **kwargs)
self.args = None
self.unidb = None
def add_common_args(self):
self.add_logging_args()
self.add_libs_arg()
self.add_rng_arg()
def add_logging_args(self):
self.add_argument('-v', '--verbose', action='store_true',
help='be verbose')
self.add_argument('-q', '--quiet', action='store_true',
help='Be quiet (no details, no log)')
def add_libs_arg(self, required=True):
self.add_argument('-l', '--libs', metavar='LIBS',
help='ICU libraries', required=required)
def add_unicode_arg(self):
self.add_argument('-u', '--unicode', metavar='Unicode',
default='6.3.0', help='Unicode version', )
def add_rng_arg(self):
self.add_argument('-r', '--rng', metavar='RNG',
help='RelaxNG XML schema')
def add_xml_meta(self):
self.add_argument('xml', metavar='XML')
def parse_args(self, *args, **kwargs):
if not self.args:
self.args = super(LgrToolArgParser, self).parse_args(*args, **kwargs)
return self.args
def setup_logger(self):
if not self.args:
self.parse_args()
# "Disable" logging in test mode except if we ask to be verbose
log_level = logging.DEBUG if self.args.verbose else logging.INFO
if self.args.test and not self.args.verbose:
log_level = logging.ERROR
if self.args.quiet:
log_level = logging.CRITICAL
logging.basicConfig(stream=sys.stderr, level=log_level,
format="%(levelname)s:%(name)s [%(filename)s:%(lineno)s] %(message)s")
def parse_lgr(self):
if not self.args:
self.parse_args()
return parse_lgr(self.args.xml or self.args.lgr_xml, self.args.rng, self.get_unidb())
def get_unidb(self):
if not self.args:
self.parse_args()
if self.args.libs and not self.unidb:
libpath, i18n_libpath, libver = self.args.libs.split('#')
manager = UnicodeDataVersionManager()
self.unidb = manager.register(None, libpath, i18n_libpath, libver)
return self.unidb
class LgrSetToolArgParser(LgrToolArgParser):
def __init__(self, *args, **kwargs):
super(LgrSetToolArgParser, self).__init__(*args, **kwargs)
self.merged_lgr = None
self.script_lgr = None
self.set_labels = None
self.lgr = None
def add_xml_set_args(self):
self.add_argument('-x', '--lgr-xml', metavar='LGR_XML', action='append', required=True,
help='The LGR or LGR set if used multiple times')
self.add_argument('-s', '--lgr-script', metavar='LGR_SCRIPT',
help='If LGR is a set, the script used to validate input labels')
self.add_argument('-f', '--set-labels', metavar='SET_LABELS',
help='If LGR is a set, the file containing the label of the LGR set')
def process_set(self, optional_set_labels):
if len(self.args.lgr_xml) > 1:
if not self.args.lgr_script:
logger.error('For LGR set, lgr script is required')
return
if not optional_set_labels and not self.args.set_labels:
logger.error('For LGR set, LGR set labels file is required')
return
self.merged_lgr, lgr_set = merge_lgrs(self.args.lgr_xml,
rng=self.args.rng,
unidb=self.get_unidb())
if not self.merged_lgr:
logger.error('Error while creating the merged LGR')
return
self.set_labels = io.StringIO()
if self.args.set_labels:
with io.open(self.args.set_labels, 'r', encoding='utf-8') as set_labels_input:
self.set_labels = io.StringIO(set_labels_input.read())
self.script_lgr = None
for lgr_s in lgr_set:
try:
if lgr_s.metadata.languages[0] == self.args.lgr_script:
if self.script_lgr:
logger.warning('Script %s is provided in more than one LGR of the set, '
'will only evaluate with %s', self.args.lgr_script, lgr_s.name)
self.script_lgr = lgr_s
except (AttributeError, IndexError):
pass
if not self.script_lgr:
logger.error('Cannot find script %s in any of the LGR provided as input', self.args.lgr_script)
return
else:
self.lgr = parse_lgr(self.args.lgr_mxml[0], self.args.rng, self.get_unidb())
if self.lgr is None:
logger.error("Error while parsing LGR file.")
logger.error("Please check compliance with RNG.")
return
return True
class LgrDumpTool(LgrToolArgParser):
def __init__(self, rfc_parser_cls, *args, **kwargs):
super(LgrDumpTool, self).__init__(*args, **kwargs)
self.rfc_parser_cls = rfc_parser_cls
def run(self):
self.add_logging_args()
self.add_argument('-o', '--output', metavar='OUTPUT',
help='Optional output file')
self.add_argument('file', metavar='FILE')
self.parse_args()
self.setup_logger()
rfc_parser = self.rfc_parser_cls(self.args.file)
lgr = rfc_parser.parse_document()
if self.args.output is not None:
xml = serialize_lgr_xml(lgr, pretty_print=True)
with io.open(self.args.output, mode='wb') as output:
output.write(xml)
else:
print(serialize_lgr_xml(lgr, pretty_print=True, encoding='unicode', xml_declaration=False))
| 33.047836 | 114 | 0.5945 |
acf9ff6f0ebe1315f3c55812e376d10098e3d1af | 24,020 | py | Python | venv/lib/python2.7/site-packages/ansible/modules/network/netscaler/netscaler_gslb_service.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 37 | 2017-08-15T15:02:43.000Z | 2021-07-23T03:44:31.000Z | venv/lib/python2.7/site-packages/ansible/modules/network/netscaler/netscaler_gslb_service.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 12 | 2018-01-10T05:25:25.000Z | 2021-11-28T06:55:48.000Z | venv/lib/python2.7/site-packages/ansible/modules/network/netscaler/netscaler_gslb_service.py | haind27/test01 | 7f86c0a33eb0874a6c3f5ff9a923fd0cfc8ef852 | [
"MIT"
] | 49 | 2017-08-15T09:52:13.000Z | 2022-03-21T17:11:54.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_gslb_service
short_description: Manage gslb service entities in Netscaler.
description:
- Manage gslb service entities in Netscaler.
version_added: "2.4"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
servicename:
description:
- >-
Name for the GSLB service. Must begin with an ASCII alphanumeric or underscore C(_) character, and
must contain only ASCII alphanumeric, underscore C(_), hash C(#), period C(.), space, colon C(:), at C(@),
equals C(=), and hyphen C(-) characters. Can be changed after the GSLB service is created.
- >-
- "Minimum length = 1"
cnameentry:
description:
- "Canonical name of the GSLB service. Used in CNAME-based GSLB."
- "Minimum length = 1"
servername:
description:
- "Name of the server hosting the GSLB service."
- "Minimum length = 1"
servicetype:
choices:
- 'HTTP'
- 'FTP'
- 'TCP'
- 'UDP'
- 'SSL'
- 'SSL_BRIDGE'
- 'SSL_TCP'
- 'NNTP'
- 'ANY'
- 'SIP_UDP'
- 'SIP_TCP'
- 'SIP_SSL'
- 'RADIUS'
- 'RDP'
- 'RTSP'
- 'MYSQL'
- 'MSSQL'
- 'ORACLE'
description:
- "Type of service to create."
port:
description:
- "Port on which the load balancing entity represented by this GSLB service listens."
- "Minimum value = 1"
- "Range 1 - 65535"
- "* in CLI is represented as 65535 in NITRO API"
publicip:
description:
- >-
The public IP address that a NAT device translates to the GSLB service's private IP address.
Optional.
publicport:
description:
- >-
The public port associated with the GSLB service's public IP address. The port is mapped to the
service's private port number. Applicable to the local GSLB service. Optional.
maxclient:
description:
- >-
The maximum number of open connections that the service can support at any given time. A GSLB service
whose connection count reaches the maximum is not considered when a GSLB decision is made, until the
connection count drops below the maximum.
- "Minimum value = C(0)"
- "Maximum value = C(4294967294)"
healthmonitor:
description:
- "Monitor the health of the GSLB service."
type: bool
sitename:
description:
- "Name of the GSLB site to which the service belongs."
- "Minimum length = 1"
cip:
choices:
- 'enabled'
- 'disabled'
description:
- >-
In the request that is forwarded to the GSLB service, insert a header that stores the client's IP
address. Client IP header insertion is used in connection-proxy based site persistence.
cipheader:
description:
- >-
Name for the HTTP header that stores the client's IP address. Used with the Client IP option. If
client IP header insertion is enabled on the service and a name is not specified for the header, the
NetScaler appliance uses the name specified by the cipHeader parameter in the set ns param command
or, in the GUI, the Client IP Header parameter in the Configure HTTP Parameters dialog box.
- "Minimum length = 1"
sitepersistence:
choices:
- 'ConnectionProxy'
- 'HTTPRedirect'
- 'NONE'
description:
- "Use cookie-based site persistence. Applicable only to C(HTTP) and C(SSL) GSLB services."
siteprefix:
description:
- >-
The site's prefix string. When the service is bound to a GSLB virtual server, a GSLB site domain is
generated internally for each bound service-domain pair by concatenating the site prefix of the
service and the name of the domain. If the special string NONE is specified, the site-prefix string
is unset. When implementing HTTP redirect site persistence, the NetScaler appliance redirects GSLB
requests to GSLB services by using their site domains.
clttimeout:
description:
- >-
Idle time, in seconds, after which a client connection is terminated. Applicable if connection proxy
based site persistence is used.
- "Minimum value = 0"
- "Maximum value = 31536000"
maxbandwidth:
description:
- >-
Integer specifying the maximum bandwidth allowed for the service. A GSLB service whose bandwidth
reaches the maximum is not considered when a GSLB decision is made, until its bandwidth consumption
drops below the maximum.
downstateflush:
choices:
- 'enabled'
- 'disabled'
description:
- >-
Flush all active transactions associated with the GSLB service when its state transitions from UP to
DOWN. Do not enable this option for services that must complete their transactions. Applicable if
connection proxy based site persistence is used.
maxaaausers:
description:
- >-
Maximum number of SSL VPN users that can be logged on concurrently to the VPN virtual server that is
represented by this GSLB service. A GSLB service whose user count reaches the maximum is not
considered when a GSLB decision is made, until the count drops below the maximum.
- "Minimum value = C(0)"
- "Maximum value = C(65535)"
monthreshold:
description:
- >-
Monitoring threshold value for the GSLB service. If the sum of the weights of the monitors that are
bound to this GSLB service and are in the UP state is not equal to or greater than this threshold
value, the service is marked as DOWN.
- "Minimum value = C(0)"
- "Maximum value = C(65535)"
hashid:
description:
- "Unique hash identifier for the GSLB service, used by hash based load balancing methods."
- "Minimum value = C(1)"
comment:
description:
- "Any comments that you might want to associate with the GSLB service."
appflowlog:
choices:
- 'enabled'
- 'disabled'
description:
- "Enable logging appflow flow information."
ipaddress:
description:
- >-
IP address for the GSLB service. Should represent a load balancing, content switching, or VPN virtual
server on the NetScaler appliance, or the IP address of another load balancing device.
monitor_bindings:
description:
- Bind monitors to this gslb service
suboptions:
weight:
description:
- Weight to assign to the monitor-service binding.
- A larger number specifies a greater weight.
- Contributes to the monitoring threshold, which determines the state of the service.
- Minimum value = C(1)
- Maximum value = C(100)
monitor_name:
description:
- Monitor name.
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
- name: Setup gslb service 2
delegate_to: localhost
register: result
check_mode: "{{ check_mode }}"
netscaler_gslb_service:
operation: present
servicename: gslb-service-2
cnameentry: example.com
sitename: gslb-site-1
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: "['message 1', 'message 2']"
msg:
description: Message detailing the failure reason
returned: failure
type: string
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dictionary
sample: "{ 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }"
'''
import copy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netscaler.netscaler import (
ConfigProxy,
get_nitro_client,
netscaler_common_arguments,
log,
loglines,
ensure_feature_is_enabled,
monkey_patch_nitro_api,
get_immutables_intersection,
)
try:
monkey_patch_nitro_api()
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice import gslbservice
from nssrc.com.citrix.netscaler.nitro.resource.config.gslb.gslbservice_lbmonitor_binding import gslbservice_lbmonitor_binding
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
def gslb_service_exists(client, module):
if gslbservice.count_filtered(client, 'servicename:%s' % module.params['servicename']) > 0:
return True
else:
return False
def gslb_service_identical(client, module, gslb_service_proxy):
gslb_service_list = gslbservice.get_filtered(client, 'servicename:%s' % module.params['servicename'])
diff_dict = gslb_service_proxy.diff_object(gslb_service_list[0])
# Ignore ip attribute missing
if 'ip' in diff_dict:
del diff_dict['ip']
if len(diff_dict) == 0:
return True
else:
return False
def get_actual_monitor_bindings(client, module):
log('get_actual_monitor_bindings')
# Get actual monitor bindings and index them by monitor_name
actual_monitor_bindings = {}
if gslbservice_lbmonitor_binding.count(client, servicename=module.params['servicename']) != 0:
# Get all monitor bindings associated with the named gslb vserver
fetched_bindings = gslbservice_lbmonitor_binding.get(client, servicename=module.params['servicename'])
# index by monitor name
for binding in fetched_bindings:
# complete_missing_attributes(binding, gslbservice_lbmonitor_binding_rw_attrs, fill_value=None)
actual_monitor_bindings[binding.monitor_name] = binding
return actual_monitor_bindings
def get_configured_monitor_bindings(client, module):
log('get_configured_monitor_bindings')
configured_monitor_proxys = {}
gslbservice_lbmonitor_binding_rw_attrs = [
'weight',
'servicename',
'monitor_name',
]
# Get configured monitor bindings and index them by monitor_name
if module.params['monitor_bindings'] is not None:
for configured_monitor_bindings in module.params['monitor_bindings']:
binding_values = copy.deepcopy(configured_monitor_bindings)
binding_values['servicename'] = module.params['servicename']
proxy = ConfigProxy(
actual=gslbservice_lbmonitor_binding(),
client=client,
attribute_values_dict=binding_values,
readwrite_attrs=gslbservice_lbmonitor_binding_rw_attrs,
readonly_attrs=[],
)
configured_monitor_proxys[configured_monitor_bindings['monitor_name']] = proxy
return configured_monitor_proxys
def monitor_bindings_identical(client, module):
log('monitor_bindings_identical')
actual_bindings = get_actual_monitor_bindings(client, module)
configured_proxys = get_configured_monitor_bindings(client, module)
actual_keyset = set(actual_bindings.keys())
configured_keyset = set(configured_proxys.keys())
symmetric_difference = actual_keyset ^ configured_keyset
if len(symmetric_difference) != 0:
log('Symmetric difference %s' % symmetric_difference)
return False
# Item for item equality test
for key, proxy in configured_proxys.items():
if not proxy.has_equal_attributes(actual_bindings[key]):
log('monitor binding difference %s' % proxy.diff_object(actual_bindings[key]))
return False
# Fallthrough to True result
return True
def sync_monitor_bindings(client, module):
log('sync_monitor_bindings')
actual_monitor_bindings = get_actual_monitor_bindings(client, module)
configured_monitor_proxys = get_configured_monitor_bindings(client, module)
# Delete actual bindings not in configured bindings
for monitor_name, actual_binding in actual_monitor_bindings.items():
if monitor_name not in configured_monitor_proxys.keys():
log('Deleting absent binding for monitor %s' % monitor_name)
log('dir is %s' % dir(actual_binding))
gslbservice_lbmonitor_binding.delete(client, actual_binding)
# Delete and re-add actual bindings that differ from configured
for proxy_key, binding_proxy in configured_monitor_proxys.items():
if proxy_key in actual_monitor_bindings:
actual_binding = actual_monitor_bindings[proxy_key]
if not binding_proxy.has_equal_attributes(actual_binding):
log('Deleting differing binding for monitor %s' % actual_binding.monitor_name)
log('dir %s' % dir(actual_binding))
log('attribute monitor_name %s' % getattr(actual_binding, 'monitor_name'))
log('attribute monitorname %s' % getattr(actual_binding, 'monitorname', None))
gslbservice_lbmonitor_binding.delete(client, actual_binding)
log('Adding anew binding for monitor %s' % binding_proxy.monitor_name)
binding_proxy.add()
# Add configured monitors that are missing from actual
for proxy_key, binding_proxy in configured_monitor_proxys.items():
if proxy_key not in actual_monitor_bindings.keys():
log('Adding monitor binding for monitor %s' % binding_proxy.monitor_name)
binding_proxy.add()
def diff_list(client, module, gslb_service_proxy):
gslb_service_list = gslbservice.get_filtered(client, 'servicename:%s' % module.params['servicename'])
diff_list = gslb_service_proxy.diff_object(gslb_service_list[0])
if 'ip' in diff_list:
del diff_list['ip']
return diff_list
def all_identical(client, module, gslb_service_proxy):
return gslb_service_identical(client, module, gslb_service_proxy) and monitor_bindings_identical(client, module)
def main():
module_specific_arguments = dict(
servicename=dict(type='str'),
cnameentry=dict(type='str'),
servername=dict(type='str'),
servicetype=dict(
type='str',
choices=[
'HTTP',
'FTP',
'TCP',
'UDP',
'SSL',
'SSL_BRIDGE',
'SSL_TCP',
'NNTP',
'ANY',
'SIP_UDP',
'SIP_TCP',
'SIP_SSL',
'RADIUS',
'RDP',
'RTSP',
'MYSQL',
'MSSQL',
'ORACLE',
]
),
port=dict(type='int'),
publicip=dict(type='str'),
publicport=dict(type='int'),
maxclient=dict(type='float'),
healthmonitor=dict(type='bool'),
sitename=dict(type='str'),
cip=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
cipheader=dict(type='str'),
sitepersistence=dict(
type='str',
choices=[
'ConnectionProxy',
'HTTPRedirect',
'NONE',
]
),
siteprefix=dict(type='str'),
clttimeout=dict(type='float'),
maxbandwidth=dict(type='float'),
downstateflush=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
maxaaausers=dict(type='float'),
monthreshold=dict(type='float'),
hashid=dict(type='float'),
comment=dict(type='str'),
appflowlog=dict(
type='str',
choices=[
'enabled',
'disabled',
]
),
ipaddress=dict(type='str'),
)
hand_inserted_arguments = dict(
monitor_bindings=dict(type='list'),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
readwrite_attrs = [
'servicename',
'cnameentry',
'ip',
'servername',
'servicetype',
'port',
'publicip',
'publicport',
'maxclient',
'healthmonitor',
'sitename',
'cip',
'cipheader',
'sitepersistence',
'siteprefix',
'clttimeout',
'maxbandwidth',
'downstateflush',
'maxaaausers',
'monthreshold',
'hashid',
'comment',
'appflowlog',
'ipaddress',
]
readonly_attrs = [
'gslb',
'svrstate',
'svreffgslbstate',
'gslbthreshold',
'gslbsvcstats',
'monstate',
'preferredlocation',
'monitor_state',
'statechangetimesec',
'tickssincelaststatechange',
'threshold',
'clmonowner',
'clmonview',
'__count',
]
immutable_attrs = [
'servicename',
'cnameentry',
'ip',
'servername',
'servicetype',
'port',
'sitename',
'state',
'cipheader',
'cookietimeout',
'clttimeout',
'svrtimeout',
'viewip',
'monitor_name_svc',
'newname',
]
transforms = {
'healthmonitor': ['bool_yes_no'],
'cip': [lambda v: v.upper()],
'downstateflush': [lambda v: v.upper()],
'appflowlog': [lambda v: v.upper()],
}
# params = copy.deepcopy(module.params)
module.params['ip'] = module.params['ipaddress']
# Instantiate config proxy
gslb_service_proxy = ConfigProxy(
actual=gslbservice(),
client=client,
attribute_values_dict=module.params,
transforms=transforms,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
)
try:
ensure_feature_is_enabled(client, 'GSLB')
# Apply appropriate state
if module.params['state'] == 'present':
if not gslb_service_exists(client, module):
if not module.check_mode:
gslb_service_proxy.add()
sync_monitor_bindings(client, module)
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not all_identical(client, module, gslb_service_proxy):
# Check if we try to change value of immutable attributes
immutables_changed = get_immutables_intersection(gslb_service_proxy, diff_list(client, module, gslb_service_proxy).keys())
if immutables_changed != []:
module.fail_json(
msg='Cannot update immutable attributes %s' % (immutables_changed,),
diff=diff_list(client, module, gslb_service_proxy),
**module_result
)
# Update main configuration object
if not gslb_service_identical(client, module, gslb_service_proxy):
if not module.check_mode:
gslb_service_proxy.update()
# Update monitor bindigns
if not monitor_bindings_identical(client, module):
if not module.check_mode:
sync_monitor_bindings(client, module)
# Fallthrough to save and change status update
module_result['changed'] = True
if module.params['save_config']:
client.save_config()
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
if not gslb_service_exists(client, module):
module.fail_json(msg='GSLB service does not exist', **module_result)
if not gslb_service_identical(client, module, gslb_service_proxy):
module.fail_json(
msg='GSLB service differs from configured',
diff=diff_list(client, module, gslb_service_proxy),
**module_result
)
if not monitor_bindings_identical(client, module):
module.fail_json(
msg='Monitor bindings differ from configured',
diff=diff_list(client, module, gslb_service_proxy),
**module_result
)
elif module.params['state'] == 'absent':
if gslb_service_exists(client, module):
if not module.check_mode:
gslb_service_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for state
if not module.check_mode:
if gslb_service_exists(client, module):
module.fail_json(msg='GSLB service still exists', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| 34.511494 | 138 | 0.597211 |
acfa01224b62069bc2ac88101e39165a3fdaf64d | 7,742 | py | Python | src/spring-cloud/azext_spring_cloud/tests/latest/test_asc_scenario.py | msazurestackworkloads/azure-cli-extensions | 88b6eebc92489e41403ac4e8ba1179306cd16bdf | [
"MIT"
] | 1 | 2021-08-31T19:10:04.000Z | 2021-08-31T19:10:04.000Z | src/spring-cloud/azext_spring_cloud/tests/latest/test_asc_scenario.py | msazurestackworkloads/azure-cli-extensions | 88b6eebc92489e41403ac4e8ba1179306cd16bdf | [
"MIT"
] | 5 | 2022-03-08T17:46:24.000Z | 2022-03-23T18:27:45.000Z | src/spring-cloud/azext_spring_cloud/tests/latest/test_asc_scenario.py | msazurestackworkloads/azure-cli-extensions | 88b6eebc92489e41403ac4e8ba1179306cd16bdf | [
"MIT"
] | 1 | 2021-06-09T18:38:47.000Z | 2021-06-09T18:38:47.000Z | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import os
import unittest
from knack.util import CLIError
from azure_devtools.scenario_tests import AllowLargeResponse
from azure.cli.testsdk import (ScenarioTest, ResourceGroupPreparer, StorageAccountPreparer, record_only)
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
@record_only()
class CustomDomainTests(ScenarioTest):
def test_bind_cert_to_domain(self):
self.kwargs.update({
'cert': 'test-cert',
'keyVaultUri': 'https://integration-test-prod.vault.azure.net/',
'KeyVaultCertName': 'cli-unittest',
'domain': 'cli.asc-test.net',
'app': 'test-app',
'serviceName': 'cli-unittest',
'rg': 'cli'
})
self.cmd('spring-cloud certificate add --name {cert} --vault-uri {keyVaultUri} --vault-certificate-name {KeyVaultCertName} -g {rg} -s {serviceName}', checks=[
self.check('name', '{cert}')
])
self.cmd('spring-cloud certificate show --name {cert} -g {rg} -s {serviceName}', checks=[
self.check('name', '{cert}')
])
result = self.cmd('spring-cloud certificate list -g {rg} -s {serviceName}').get_output_in_json()
self.assertTrue(len(result) > 0)
self.cmd('spring-cloud app custom-domain bind --domain-name {domain} --app {app} -g {rg} -s {serviceName}', checks=[
self.check('name', '{domain}')
])
self.cmd('spring-cloud app custom-domain show --domain-name {domain} --app {app} -g {rg} -s {serviceName}', checks=[
self.check('name', '{domain}'),
self.check('properties.appName', '{app}')
])
result = self.cmd('spring-cloud app custom-domain list --app {app} -g {rg} -s {serviceName}').get_output_in_json()
self.assertTrue(len(result) > 0)
self.cmd('spring-cloud app custom-domain update --domain-name {domain} --certificate {cert} --app {app} -g {rg} -s {serviceName}', checks=[
self.check('name', '{domain}'),
self.check('properties.appName', '{app}'),
self.check('properties.certName', '{cert}')
])
self.cmd('spring-cloud app custom-domain unbind --domain-name {domain} --app {app} -g {rg} -s {serviceName}')
self.cmd('spring-cloud app custom-domain show --domain-name {domain} --app {app} -g {rg} -s {serviceName}', expect_failure=True)
self.cmd('spring-cloud certificate remove --name {cert} -g {rg} -s {serviceName}')
self.cmd('spring-cloud certificate show --name {cert} -g {rg} -s {serviceName}', expect_failure=True)
class ByosTest(ScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_persistent_storage(self, resource_group, storage_account):
template = 'storage account keys list -n {} -g {} --query "[0].value" -otsv'
accountkey = self.cmd(template.format(storage_account, resource_group)).output
self.kwargs.update({
'storageType': 'StorageAccount',
'storage': 'test-storage-name',
'app': 'test-app',
'serviceName': 'cli-unittest',
'location': 'centralus',
'accountKey': accountkey,
'resource_group': resource_group,
'storage_account': storage_account,
})
self.cmd('spring-cloud create -n {serviceName} -g {resource_group} -l {location}')
self.cmd('spring-cloud storage add --name {storage} --storage-type {storageType} --account-name {storage_account} --account-key {accountKey} -g {resource_group} -s {serviceName}', checks=[
self.check('name', '{storage}'),
self.check('properties.storageType', '{storageType}'),
self.check('properties.accountName', '{storage_account}'),
])
self.cmd('spring-cloud storage show --name {storage} -g {resource_group} -s {serviceName}', checks=[
self.check('name', '{storage}')
])
result = self.cmd('spring-cloud storage list -g {resource_group} -s {serviceName}').get_output_in_json()
self.assertTrue(len(result) > 0)
self.cmd('spring-cloud storage remove --name {storage} -g {resource_group} -s {serviceName}')
self.cmd('spring-cloud storage show --name {storage} -g {resource_group} -s {serviceName}', expect_failure=True)
self.cmd('spring-cloud delete -n {serviceName} -g {rg}')
class SslTests(ScenarioTest):
def test_load_public_cert_to_app(self):
py_path = os.path.abspath(os.path.dirname(__file__))
baltiCertPath = os.path.join(py_path, 'files/BaltimoreCyberTrustRoot.crt.pem')
digiCertPath = os.path.join(py_path, 'files/DigiCertGlobalRootCA.crt.pem')
loadCertPath = os.path.join(py_path, 'files/load_certificate.json')
self.kwargs.update({
'cert': 'test-cert',
'keyVaultUri': 'https://integration-test-prod.vault.azure.net/',
'KeyVaultCertName': 'cli-unittest',
'baltiCert': 'balti-cert',
'digiCert': 'digi-cert',
'baltiCertPath': baltiCertPath,
'digiCertPath': digiCertPath,
'loadCertPath': loadCertPath,
'app': 'test-app',
'serviceName': 'cli-unittest',
'rg': 'cli',
'location': 'westus'
})
self.cmd('group create -n {rg} -l {location}')
self.cmd('spring-cloud create -n {serviceName} -g {rg} -l {location}')
self.cmd(
'spring-cloud certificate add --name {digiCert} -f {digiCertPath} -g {rg} -s {serviceName}',
checks=[
self.check('name', '{digiCert}')
])
self.cmd(
'spring-cloud certificate add --name {baltiCert} -f {baltiCertPath} -g {rg} -s {serviceName}',
checks=[
self.check('name', '{baltiCert}')
])
self.cmd(
'spring-cloud certificate show --name {digiCert} -g {rg} -s {serviceName}', checks=[
self.check('name', '{digiCert}')
])
self.cmd(
'spring-cloud certificate show --name {baltiCert} -g {rg} -s {serviceName}', checks=[
self.check('name', '{baltiCert}')
])
cert_result = self.cmd(
'spring-cloud certificate list -g {rg} -s {serviceName}').get_output_in_json()
self.assertTrue(len(cert_result) == 2)
self.cmd(
'spring-cloud app create --name {app} -f {loadCertPath} -g {rg} -s {serviceName}')
self.cmd(
'spring-cloud app append-loaded-public-certificate --name {app} --certificate-name {digiCert} --load-trust-store true -g {rg} -s {serviceName}')
app_result = self.cmd(
'spring-cloud certificate list-reference-app --name {digiCert} -g {rg} -s {serviceName}').get_output_in_json()
self.assertTrue(len(app_result) > 0)
app_result = self.cmd(
'spring-cloud certificate list-reference-app --name {digiCert} -g {rg} -s {serviceName}').get_output_in_json()
self.assertTrue(len(app_result) > 0)
self.cmd('spring-cloud delete -n {serviceName} -g {rg}') | 44.24 | 197 | 0.57737 |
acfa0388abed6638ebe277d68d1f8c86e706e5e2 | 605 | py | Python | studies/009_voelz_nspe/Nspe_2/plot_phi_vs_chi.py | bieniekmateusz/forcebalance | 593791866e622ab4eae23ce29a0bed27499a118d | [
"BSD-3-Clause"
] | 98 | 2015-03-31T06:42:14.000Z | 2022-03-13T12:07:37.000Z | studies/009_voelz_nspe/Nspe_2/plot_phi_vs_chi.py | bieniekmateusz/forcebalance | 593791866e622ab4eae23ce29a0bed27499a118d | [
"BSD-3-Clause"
] | 121 | 2015-07-13T15:57:02.000Z | 2022-03-24T20:07:10.000Z | studies/009_voelz_nspe/Nspe_2/plot_phi_vs_chi.py | bieniekmateusz/forcebalance | 593791866e622ab4eae23ce29a0bed27499a118d | [
"BSD-3-Clause"
] | 66 | 2015-04-06T03:05:04.000Z | 2022-02-26T05:11:59.000Z | from __future__ import division
from __future__ import print_function
from builtins import range
from past.utils import old_div
import os, sys
import numpy as np
import matplotlib.pyplot as plt
from scipy import loadtxt
data = loadtxt('energies.dat')
chi, phi, E = data[:,1], data[:,2], data[:,3]
chi_bins, phi_bins = int(chi/15), int(phi/30)
nchi = int(360/15) + 1
nphi = int(360/30) + 1
E_2D = np.max(E)*np.ones( (nphi, nchi), dtype=np.float )
for i in range(len(chi)):
E_2D[phi_bins[i], chi_bins[i]] = E[i]
print(E_2D)
plt.figure()
#plt.pcolor(E_2D.transpose())
plt.contour(E_2D)
plt.show()
| 20.862069 | 56 | 0.704132 |
acfa03e45b637557f87a3aea3a06c157597f77a1 | 383 | py | Python | prj/wsgi.py | theSundayProgrammer/veed | 772a086650272c99e2f95482b9784f3b6f5ea7a7 | [
"Apache-2.0"
] | null | null | null | prj/wsgi.py | theSundayProgrammer/veed | 772a086650272c99e2f95482b9784f3b6f5ea7a7 | [
"Apache-2.0"
] | 1 | 2021-02-22T04:29:20.000Z | 2021-02-22T23:06:01.000Z | prj/wsgi.py | theSundayProgrammer/veed | 772a086650272c99e2f95482b9784f3b6f5ea7a7 | [
"Apache-2.0"
] | 1 | 2021-02-22T04:35:08.000Z | 2021-02-22T04:35:08.000Z | """
WSGI config for prj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'prj.settings')
application = get_wsgi_application()
| 22.529412 | 78 | 0.780679 |
acfa042db3d1884194fc14074496ade7430a645a | 1,997 | py | Python | Lamp_or_Bookbag_Classification/src/model/classify_app.py | Jacobjeevan/Side-Projects | 21d7142d5d7b4828d1736b3cc5db3a1d67dba4e6 | [
"MIT"
] | null | null | null | Lamp_or_Bookbag_Classification/src/model/classify_app.py | Jacobjeevan/Side-Projects | 21d7142d5d7b4828d1736b3cc5db3a1d67dba4e6 | [
"MIT"
] | null | null | null | Lamp_or_Bookbag_Classification/src/model/classify_app.py | Jacobjeevan/Side-Projects | 21d7142d5d7b4828d1736b3cc5db3a1d67dba4e6 | [
"MIT"
] | null | null | null | #import imutils
#from imutils.video import VideoStream as vs
import io
import json
import torch
import torch.nn as nn
from torchvision import models
import torchvision.transforms as transforms
from PIL import Image
#from flask import Flask, jsonify, request
#app = Flask(__name__)
#imagenet_class_index = json.load(open('<PATH/TO/.json/FILE>/imagenet_class_index.json'))
classes = ["bookbag", "lamp"]
model = models.resnet18(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 2)
model.load_state_dict(torch.load("../../data/outputs/best_model.pth"))
model.eval()
def transform_image(image_bytes):
my_transforms = transforms.Compose([transforms.Resize(255),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
[0.485, 0.456, 0.406],
[0.229, 0.224, 0.225])])
image = Image.open(io.BytesIO(image_bytes))
return my_transforms(image).unsqueeze(0)
def get_prediction(image_bytes):
tensor = transform_image(image_bytes=image_bytes)
outputs = model.forward(tensor)
_, y_hat = outputs.max(1)
predicted_idx = y_hat.item()
return classes[predicted_idx]
with open("IMG_20200510_141551.jpg", 'rb') as f:
image_bytes = f.read()
print(get_prediction(image_bytes=image_bytes))
'''
@app.route('/predict', methods=['POST'])
def predict():
if request.method == 'POST':
file = request.files['file']
img_bytes = file.read()
class_id, class_name = get_prediction(image_bytes=img_bytes)
return jsonify({'class_id': class_id, 'class_name': class_name})
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
if __name__ == '__main__':
app.run()''' | 32.209677 | 89 | 0.649474 |
acfa0488feaf94bd647fd5b671e32771cbc22538 | 160 | py | Python | envelope_evolution/1.nonredundant_envs/7.draw_trees.py | oaxiom/chemokines | b8d4d14d3a67a4113c409f0f72d3d822a08c8c93 | [
"MIT"
] | null | null | null | envelope_evolution/1.nonredundant_envs/7.draw_trees.py | oaxiom/chemokines | b8d4d14d3a67a4113c409f0f72d3d822a08c8c93 | [
"MIT"
] | null | null | null | envelope_evolution/1.nonredundant_envs/7.draw_trees.py | oaxiom/chemokines | b8d4d14d3a67a4113c409f0f72d3d822a08c8c93 | [
"MIT"
] | null | null | null |
from ete3 import Tree, TreeStyle
t = Tree('single_representative_envs.dnd')
ts.show_leaf_name = True
ts.mode = "c"
t.render('envelopes.pdf', tree_style=ts)
| 16 | 42 | 0.74375 |
acfa049c0dc008e8febd73fb8b64f980a287bd54 | 14,178 | py | Python | elaina/modules/backups.py | Vishal324140/ElainaRobot | d72092e9d2ddc9f94f21374cad57aea390612586 | [
"MIT"
] | 1 | 2022-01-31T08:44:33.000Z | 2022-01-31T08:44:33.000Z | elaina/modules/backups.py | Vishal324140/ElainaRobot | d72092e9d2ddc9f94f21374cad57aea390612586 | [
"MIT"
] | 8 | 2022-01-30T22:22:47.000Z | 2022-03-13T03:01:18.000Z | elaina/modules/backups.py | animeSubbingTeam/MitsuhaTaki | 64ff3ba483656bc1246c863ebffbaddd76cb145f | [
"MIT"
] | 1 | 2022-02-14T03:44:41.000Z | 2022-02-14T03:44:41.000Z | # MIT License
# Copyright (c) 2022 Zenitsu Prjkt™
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import os
import time
from io import BytesIO
from telegram import Message, ParseMode
from telegram.error import BadRequest
from telegram.ext import CommandHandler
# from elaina.modules.sql import warns_sql as warnssql
import elaina.modules.sql.blacklist_sql as blacklistsql
# from elaina.modules.sql import cust_filters_sql as filtersql
# import elaina.modules.sql.welcome_sql as welcsql
import elaina.modules.sql.locks_sql as locksql
import elaina.modules.sql.notes_sql as sql
# from elaina.modules.rules import get_rules
import elaina.modules.sql.rules_sql as rulessql
from elaina import JOIN_LOGGER, LOGGER, OWNER_ID, SUPPORT_CHAT, dispatcher
from elaina.__main__ import DATA_IMPORT
from elaina.modules.connection import connected
from elaina.modules.helper_funcs.alternate import typing_action
from elaina.modules.helper_funcs.chat_status import user_admin
from elaina.modules.sql import disable_sql as disabledsql
@user_admin
@typing_action
def import_data(update, context):
msg = update.effective_message
chat = update.effective_chat
user = update.effective_user
# TODO: allow uploading doc with command, not just as reply
# only work with a doc
conn = connected(context.bot, update, chat, user.id, need_admin=True)
if conn:
chat = dispatcher.bot.getChat(conn)
chat_name = dispatcher.bot.getChat(conn).title
else:
if update.effective_message.chat.type == "private":
update.effective_message.reply_text("This is a group only command!")
return ""
chat = update.effective_chat
chat_name = update.effective_message.chat.title
if msg.reply_to_message and msg.reply_to_message.document:
try:
file_info = context.bot.get_file(msg.reply_to_message.document.file_id)
except BadRequest:
msg.reply_text(
"Try downloading and uploading the file yourself again, This one seem broken to me!",
)
return
with BytesIO() as file:
file_info.download(out=file)
file.seek(0)
data = json.load(file)
# only import one group
if len(data) > 1 and str(chat.id) not in data:
msg.reply_text(
"There are more than one group in this file and the chat.id is not same! How am i supposed to import it?",
)
return
# Check if backup is this chat
try:
if data.get(str(chat.id)) is None:
if conn:
text = "Backup comes from another chat, I can't return another chat to chat *{}*".format(
chat_name,
)
else:
text = "Backup comes from another chat, I can't return another chat to this chat"
return msg.reply_text(text, parse_mode="markdown")
except Exception:
return msg.reply_text("There was a problem while importing the data!")
# Check if backup is from self
try:
if str(context.bot.id) != str(data[str(chat.id)]["bot"]):
return msg.reply_text(
"Backup from another bot that is not suggested might cause the problem, documents, photos, videos, audios, records might not work as it should be.",
)
except Exception:
pass
# Select data source
if str(chat.id) in data:
data = data[str(chat.id)]["hashes"]
else:
data = data[list(data.keys())[0]]["hashes"]
try:
for mod in DATA_IMPORT:
mod.__import_data__(str(chat.id), data)
except Exception:
msg.reply_text(
f"An error occurred while recovering your data. The process failed. If you experience a problem with this, please take it to @{SUPPORT_CHAT}",
)
LOGGER.exception(
"Imprt for the chat %s with the name %s failed.",
str(chat.id),
str(chat.title),
)
return
# TODO: some of that link logic
# NOTE: consider default permissions stuff?
if conn:
text = "Backup fully restored on *{}*.".format(chat_name)
else:
text = "Backup fully restored"
msg.reply_text(text, parse_mode="markdown")
@user_admin
def export_data(update, context):
chat_data = context.chat_data
msg = update.effective_message # type: Optional[Message]
user = update.effective_user # type: Optional[User]
chat_id = update.effective_chat.id
chat = update.effective_chat
current_chat_id = update.effective_chat.id
conn = connected(context.bot, update, chat, user.id, need_admin=True)
if conn:
chat = dispatcher.bot.getChat(conn)
chat_id = conn
# chat_name = dispatcher.bot.getChat(conn).title
else:
if update.effective_message.chat.type == "private":
update.effective_message.reply_text("This is a group only command!")
return ""
chat = update.effective_chat
chat_id = update.effective_chat.id
# chat_name = update.effective_message.chat.title
jam = time.time()
new_jam = jam + 10800
checkchat = get_chat(chat_id, chat_data)
if checkchat.get("status"):
if jam <= int(checkchat.get("value")):
timeformatt = time.strftime(
"%H:%M:%S %d/%m/%Y",
time.localtime(checkchat.get("value")),
)
update.effective_message.reply_text(
"You can only backup once a day!\nYou can backup again in about `{}`".format(
timeformatt,
),
parse_mode=ParseMode.MARKDOWN,
)
return
if user.id != OWNER_ID:
put_chat(chat_id, new_jam, chat_data)
else:
if user.id != OWNER_ID:
put_chat(chat_id, new_jam, chat_data)
note_list = sql.get_all_chat_notes(chat_id)
backup = {}
# button = ""
buttonlist = []
namacat = ""
isicat = ""
rules = ""
count = 0
countbtn = 0
# Notes
for note in note_list:
count += 1
# getnote = sql.get_note(chat_id, note.name)
namacat += "{}<###splitter###>".format(note.name)
if note.msgtype == 1:
tombol = sql.get_buttons(chat_id, note.name)
# keyb = []
for btn in tombol:
countbtn += 1
if btn.same_line:
buttonlist.append(
("{}".format(btn.name), "{}".format(btn.url), True),
)
else:
buttonlist.append(
("{}".format(btn.name), "{}".format(btn.url), False),
)
isicat += "###button###: {}<###button###>{}<###splitter###>".format(
note.value,
str(buttonlist),
)
buttonlist.clear()
elif note.msgtype == 2:
isicat += "###sticker###:{}<###splitter###>".format(note.file)
elif note.msgtype == 3:
isicat += "###file###:{}<###TYPESPLIT###>{}<###splitter###>".format(
note.file,
note.value,
)
elif note.msgtype == 4:
isicat += "###photo###:{}<###TYPESPLIT###>{}<###splitter###>".format(
note.file,
note.value,
)
elif note.msgtype == 5:
isicat += "###audio###:{}<###TYPESPLIT###>{}<###splitter###>".format(
note.file,
note.value,
)
elif note.msgtype == 6:
isicat += "###voice###:{}<###TYPESPLIT###>{}<###splitter###>".format(
note.file,
note.value,
)
elif note.msgtype == 7:
isicat += "###video###:{}<###TYPESPLIT###>{}<###splitter###>".format(
note.file,
note.value,
)
elif note.msgtype == 8:
isicat += "###video_note###:{}<###TYPESPLIT###>{}<###splitter###>".format(
note.file,
note.value,
)
else:
isicat += "{}<###splitter###>".format(note.value)
notes = {
"#{}".format(namacat.split("<###splitter###>")[x]): "{}".format(
isicat.split("<###splitter###>")[x],
)
for x in range(count)
}
# Rules
rules = rulessql.get_rules(chat_id)
# Blacklist
bl = list(blacklistsql.get_chat_blacklist(chat_id))
# Disabled command
disabledcmd = list(disabledsql.get_all_disabled(chat_id))
# Filters (TODO)
"""
all_filters = list(filtersql.get_chat_triggers(chat_id))
export_filters = {}
for filters in all_filters:
filt = filtersql.get_filter(chat_id, filters)
# print(vars(filt))
if filt.is_sticker:
tipefilt = "sticker"
elif filt.is_document:
tipefilt = "doc"
elif filt.is_image:
tipefilt = "img"
elif filt.is_audio:
tipefilt = "audio"
elif filt.is_voice:
tipefilt = "voice"
elif filt.is_video:
tipefilt = "video"
elif filt.has_buttons:
tipefilt = "button"
buttons = filtersql.get_buttons(chat.id, filt.keyword)
print(vars(buttons))
elif filt.has_markdown:
tipefilt = "text"
if tipefilt == "button":
content = "{}#=#{}|btn|{}".format(tipefilt, filt.reply, buttons)
else:
content = "{}#=#{}".format(tipefilt, filt.reply)
print(content)
export_filters[filters] = content
print(export_filters)
"""
# Welcome (TODO)
# welc = welcsql.get_welc_pref(chat_id)
# Locked
curr_locks = locksql.get_locks(chat_id)
curr_restr = locksql.get_restr(chat_id)
if curr_locks:
locked_lock = {
"sticker": curr_locks.sticker,
"audio": curr_locks.audio,
"voice": curr_locks.voice,
"document": curr_locks.document,
"video": curr_locks.video,
"contact": curr_locks.contact,
"photo": curr_locks.photo,
"gif": curr_locks.gif,
"url": curr_locks.url,
"bots": curr_locks.bots,
"forward": curr_locks.forward,
"game": curr_locks.game,
"location": curr_locks.location,
"rtl": curr_locks.rtl,
}
else:
locked_lock = {}
if curr_restr:
locked_restr = {
"messages": curr_restr.messages,
"media": curr_restr.media,
"other": curr_restr.other,
"previews": curr_restr.preview,
"all": all(
[
curr_restr.messages,
curr_restr.media,
curr_restr.other,
curr_restr.preview,
],
),
}
else:
locked_restr = {}
locks = {"locks": locked_lock, "restrict": locked_restr}
# Warns (TODO)
# warns = warnssql.get_warns(chat_id)
# Backing up
backup[chat_id] = {
"bot": context.bot.id,
"hashes": {
"info": {"rules": rules},
"extra": notes,
"blacklist": bl,
"disabled": disabledcmd,
"locks": locks,
},
}
baccinfo = json.dumps(backup, indent=4)
with open("elaina{}.backup".format(chat_id), "w") as f:
f.write(str(baccinfo))
context.bot.sendChatAction(current_chat_id, "upload_document")
tgl = time.strftime("%H:%M:%S - %d/%m/%Y", time.localtime(time.time()))
try:
context.bot.sendMessage(
JOIN_LOGGER,
"*Successfully imported backup:*\nChat: `{}`\nChat ID: `{}`\nOn: `{}`".format(
chat.title,
chat_id,
tgl,
),
parse_mode=ParseMode.MARKDOWN,
)
except BadRequest:
pass
context.bot.sendDocument(
current_chat_id,
document=open("elaina{}.backup".format(chat_id), "rb"),
caption="*Successfully Exported backup:*\nChat: `{}`\nChat ID: `{}`\nOn: `{}`\n\nNote: This `elaina-Backup` was specially made for notes.".format(
chat.title,
chat_id,
tgl,
),
timeout=360,
reply_to_message_id=msg.message_id,
parse_mode=ParseMode.MARKDOWN,
)
os.remove("elaina{}.backup".format(chat_id)) # Cleaning file
# Temporary data
def put_chat(chat_id, value, chat_data):
# print(chat_data)
status = value is not False
chat_data[chat_id] = {"backups": {"status": status, "value": value}}
def get_chat(chat_id, chat_data):
# print(chat_data)
try:
return chat_data[chat_id]["backups"]
except KeyError:
return {"status": False, "value": False}
__mod_name__ = "Backups"
IMPORT_HANDLER = CommandHandler("import", import_data, run_async=True)
EXPORT_HANDLER = CommandHandler(
"export", export_data, pass_chat_data=True, run_async=True
)
dispatcher.add_handler(IMPORT_HANDLER)
dispatcher.add_handler(EXPORT_HANDLER)
| 34.49635 | 168 | 0.582099 |
acfa05002144351784b7c5ac5c2c0c28154e66c8 | 839 | py | Python | plotly/validators/sankey/_stream.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 1 | 2018-07-16T01:51:47.000Z | 2018-07-16T01:51:47.000Z | plotly/validators/sankey/_stream.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | null | null | null | plotly/validators/sankey/_stream.py | faezs/plotly.py | 6009b5b9c746e5d2a2849ad255a4eb234b551ed7 | [
"MIT"
] | 1 | 2019-02-18T04:12:56.000Z | 2019-02-18T04:12:56.000Z | import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name='stream', parent_name='sankey', **kwargs):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str='Stream',
data_docs="""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to *50*, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See
https://plot.ly/settings for more details.""",
**kwargs
)
| 36.478261 | 77 | 0.586412 |
acfa054ef62347ef90771140542b4bcc05161822 | 129 | py | Python | src/evaluation/__init__.py | sisl/InteractionImitation | 9c9ee8f21b53e71bbca86b0b79c6e6d913a20567 | [
"MIT"
] | 2 | 2022-03-13T19:43:08.000Z | 2022-03-14T03:19:33.000Z | src/evaluation/__init__.py | sisl/InteractionImitation | 9c9ee8f21b53e71bbca86b0b79c6e6d913a20567 | [
"MIT"
] | null | null | null | src/evaluation/__init__.py | sisl/InteractionImitation | 9c9ee8f21b53e71bbca86b0b79c6e6d913a20567 | [
"MIT"
] | null | null | null | from src.evaluation.evaluation import IntersimpleEvaluation
from src.evaluation.metrics import divergence, visualize_distribution | 64.5 | 69 | 0.899225 |
acfa057985c3b9a53dfab8c843f6bc390ad0e023 | 10,419 | py | Python | benchmarks/f3_wrong_hints_permutations/scaling_ltl_infinite_state/18-extending_bound_36.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints_permutations/scaling_ltl_infinite_state/18-extending_bound_36.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints_permutations/scaling_ltl_infinite_state/18-extending_bound_36.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
loc = Location(env, mgr.GE(r, n0))
loc.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r0", env, frozenset([r]), symbs)
h_r.set_locs([loc])
res.append(h_r)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc3", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc = Location(env, inc_i)
loc.set_progress(0, x_inc_i)
h_inc = Hint("h_inc0", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc2", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(2, mgr.Equals(x_i, i))
loc2 = Location(env, mgr.GE(i, n0))
loc2.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i4", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1, loc2])
res.append(h_i)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i2", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.GE(l, n0), mgr.GE(r, n0),
stutterT=mgr.Equals(x_l, mgr.Plus(l, r)))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l3", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc = Location(env, mgr.LE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Minus(l, n1)))
h_l = Hint("h_l1", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.LE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Minus(i, n1)))
h_i = Hint("h_i1", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i3", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l2", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1])
res.append(h_l)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i, stutterT=x_inc_i)
loc1.set_progress(2, mgr.Not(x_inc_i))
loc2 = Location(env, mgr.Not(inc_i))
loc2.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc4", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1, loc2])
res.append(h_inc)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
loc0 = Location(env, mgr.GE(r, n0), mgr.GE(i, n0),
stutterT=mgr.Equals(x_r, mgr.Plus(r, i)))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r3", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.GE(l, n0))
loc0.set_progress(1, mgr.Equals(x_l, mgr.Plus(l, n1)))
loc1 = Location(env, mgr.GE(l, n0))
loc1.set_progress(2, mgr.Equals(x_l, l))
loc2 = Location(env, mgr.GE(l, n0))
loc2.set_progress(0, mgr.Equals(x_l, l))
h_l = Hint("h_l4", env, frozenset([l]), symbs)
h_l.set_locs([loc0, loc1, loc2])
res.append(h_l)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.GE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Plus(i, n1)))
h_i = Hint("h_i0", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc = Location(env, mgr.GE(l, n0))
loc.set_progress(0, mgr.Equals(x_l, mgr.Plus(l, n1)))
h_l = Hint("h_l0", env, frozenset([l]), symbs)
h_l.set_locs([loc])
res.append(h_l)
return frozenset(res)
| 35.318644 | 89 | 0.624628 |
acfa05c8538e869c0fd9aaa303e66f3a2342c56f | 32,478 | py | Python | bin/Python27/Lib/site-packages/scipy/spatial/tests/test_qhull.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
] | null | null | null | bin/Python27/Lib/site-packages/scipy/spatial/tests/test_qhull.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
] | null | null | null | bin/Python27/Lib/site-packages/scipy/spatial/tests/test_qhull.py | lefevre-fraser/openmeta-mms | 08f3115e76498df1f8d70641d71f5c52cab4ce5f | [
"MIT"
] | 1 | 2020-08-08T12:44:48.000Z | 2020-08-08T12:44:48.000Z | from __future__ import division, print_function, absolute_import
import os
import copy
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, run_module_suite,
assert_, dec, assert_allclose, assert_array_equal,
assert_raises)
from scipy._lib.six import xrange
import scipy.spatial.qhull as qhull
from scipy.spatial import cKDTree as KDTree
def sorted_tuple(x):
return tuple(sorted(x))
def sorted_unique_tuple(x):
return tuple(np.unique(x))
def assert_unordered_tuple_list_equal(a, b, tpl=tuple):
if isinstance(a, np.ndarray):
a = a.tolist()
if isinstance(b, np.ndarray):
b = b.tolist()
a = list(map(tpl, a))
a.sort()
b = list(map(tpl, b))
b.sort()
assert_equal(a, b)
np.random.seed(1234)
points = [(0,0), (0,1), (1,0), (1,1), (0.5, 0.5), (0.5, 1.5)]
pathological_data_1 = np.array([
[-3.14,-3.14], [-3.14,-2.36], [-3.14,-1.57], [-3.14,-0.79],
[-3.14,0.0], [-3.14,0.79], [-3.14,1.57], [-3.14,2.36],
[-3.14,3.14], [-2.36,-3.14], [-2.36,-2.36], [-2.36,-1.57],
[-2.36,-0.79], [-2.36,0.0], [-2.36,0.79], [-2.36,1.57],
[-2.36,2.36], [-2.36,3.14], [-1.57,-0.79], [-1.57,0.79],
[-1.57,-1.57], [-1.57,0.0], [-1.57,1.57], [-1.57,-3.14],
[-1.57,-2.36], [-1.57,2.36], [-1.57,3.14], [-0.79,-1.57],
[-0.79,1.57], [-0.79,-3.14], [-0.79,-2.36], [-0.79,-0.79],
[-0.79,0.0], [-0.79,0.79], [-0.79,2.36], [-0.79,3.14],
[0.0,-3.14], [0.0,-2.36], [0.0,-1.57], [0.0,-0.79], [0.0,0.0],
[0.0,0.79], [0.0,1.57], [0.0,2.36], [0.0,3.14], [0.79,-3.14],
[0.79,-2.36], [0.79,-0.79], [0.79,0.0], [0.79,0.79],
[0.79,2.36], [0.79,3.14], [0.79,-1.57], [0.79,1.57],
[1.57,-3.14], [1.57,-2.36], [1.57,2.36], [1.57,3.14],
[1.57,-1.57], [1.57,0.0], [1.57,1.57], [1.57,-0.79],
[1.57,0.79], [2.36,-3.14], [2.36,-2.36], [2.36,-1.57],
[2.36,-0.79], [2.36,0.0], [2.36,0.79], [2.36,1.57],
[2.36,2.36], [2.36,3.14], [3.14,-3.14], [3.14,-2.36],
[3.14,-1.57], [3.14,-0.79], [3.14,0.0], [3.14,0.79],
[3.14,1.57], [3.14,2.36], [3.14,3.14],
])
pathological_data_2 = np.array([
[-1, -1], [-1, 0], [-1, 1],
[0, -1], [0, 0], [0, 1],
[1, -1 - np.finfo(np.float_).eps], [1, 0], [1, 1],
])
bug_2850_chunks = [np.random.rand(10, 2),
np.array([[0,0], [0,1], [1,0], [1,1]]) # add corners
]
# same with some additional chunks
bug_2850_chunks_2 = (bug_2850_chunks +
[np.random.rand(10, 2),
0.25 + np.array([[0,0], [0,1], [1,0], [1,1]])])
DATASETS = {
'some-points': np.asarray(points),
'random-2d': np.random.rand(30, 2),
'random-3d': np.random.rand(30, 3),
'random-4d': np.random.rand(30, 4),
'random-5d': np.random.rand(30, 5),
'random-6d': np.random.rand(10, 6),
'random-7d': np.random.rand(10, 7),
'random-8d': np.random.rand(10, 8),
'pathological-1': pathological_data_1,
'pathological-2': pathological_data_2
}
INCREMENTAL_DATASETS = {
'bug-2850': (bug_2850_chunks, None),
'bug-2850-2': (bug_2850_chunks_2, None),
}
def _add_inc_data(name, chunksize):
"""
Generate incremental datasets from basic data sets
"""
points = DATASETS[name]
ndim = points.shape[1]
opts = None
nmin = ndim + 2
if name == 'some-points':
# since Qz is not allowed, use QJ
opts = 'QJ Pp'
elif name == 'pathological-1':
# include enough points so that we get different x-coordinates
nmin = 12
chunks = [points[:nmin]]
for j in xrange(nmin, len(points), chunksize):
chunks.append(points[j:j+chunksize])
new_name = "%s-chunk-%d" % (name, chunksize)
assert new_name not in INCREMENTAL_DATASETS
INCREMENTAL_DATASETS[new_name] = (chunks, opts)
for name in DATASETS:
for chunksize in 1, 4, 16:
_add_inc_data(name, chunksize)
class Test_Qhull(object):
def test_swapping(self):
# Check that Qhull state swapping works
x = qhull._Qhull(b'v',
np.array([[0,0],[0,1],[1,0],[1,1.],[0.5,0.5]]),
b'Qz')
xd = copy.deepcopy(x.get_voronoi_diagram())
y = qhull._Qhull(b'v',
np.array([[0,0],[0,1],[1,0],[1,2.]]),
b'Qz')
yd = copy.deepcopy(y.get_voronoi_diagram())
xd2 = copy.deepcopy(x.get_voronoi_diagram())
x.close()
yd2 = copy.deepcopy(y.get_voronoi_diagram())
y.close()
assert_raises(RuntimeError, x.get_voronoi_diagram)
assert_raises(RuntimeError, y.get_voronoi_diagram)
assert_allclose(xd[0], xd2[0])
assert_unordered_tuple_list_equal(xd[1], xd2[1], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(xd[2], xd2[2], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(xd[3], xd2[3], tpl=sorted_tuple)
assert_array_equal(xd[4], xd2[4])
assert_allclose(yd[0], yd2[0])
assert_unordered_tuple_list_equal(yd[1], yd2[1], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(yd[2], yd2[2], tpl=sorted_tuple)
assert_unordered_tuple_list_equal(yd[3], yd2[3], tpl=sorted_tuple)
assert_array_equal(yd[4], yd2[4])
x.close()
assert_raises(RuntimeError, x.get_voronoi_diagram)
y.close()
assert_raises(RuntimeError, y.get_voronoi_diagram)
class TestUtilities(object):
"""
Check that utility functions work.
"""
def test_find_simplex(self):
# Simple check that simplex finding works
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
# +---+
# |\ 0|
# | \ |
# |1 \|
# +---+
assert_equal(tri.vertices, [[1, 3, 2], [3, 1, 0]])
for p in [(0.25, 0.25, 1),
(0.75, 0.75, 0),
(0.3, 0.2, 1)]:
i = tri.find_simplex(p[:2])
assert_equal(i, p[2], err_msg='%r' % (p,))
j = qhull.tsearch(tri, p[:2])
assert_equal(i, j)
def test_plane_distance(self):
# Compare plane distance from hyperplane equations obtained from Qhull
# to manually computed plane equations
x = np.array([(0,0), (1, 1), (1, 0), (0.99189033, 0.37674127),
(0.99440079, 0.45182168)], dtype=np.double)
p = np.array([0.99966555, 0.15685619], dtype=np.double)
tri = qhull.Delaunay(x)
z = tri.lift_points(x)
pz = tri.lift_points(p)
dist = tri.plane_distance(p)
for j, v in enumerate(tri.vertices):
x1 = z[v[0]]
x2 = z[v[1]]
x3 = z[v[2]]
n = np.cross(x1 - x3, x2 - x3)
n /= np.sqrt(np.dot(n, n))
n *= -np.sign(n[2])
d = np.dot(n, pz - x3)
assert_almost_equal(dist[j], d)
def test_convex_hull(self):
# Simple check that the convex hull seems to works
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
# +---+
# |\ 0|
# | \ |
# |1 \|
# +---+
assert_equal(tri.convex_hull, [[3, 2], [1, 2], [1, 0], [3, 0]])
def test_volume_area(self):
#Basic check that we get back the correct volume and area for a cube
points = np.array([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0),
(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)])
hull = qhull.ConvexHull(points)
assert_allclose(hull.volume, 1., rtol=1e-14,
err_msg="Volume of cube is incorrect")
assert_allclose(hull.area, 6., rtol=1e-14,
err_msg="Area of cube is incorrect")
def test_random_volume_area(self):
#Test that the results for a random 10-point convex are
#coherent with the output of qconvex Qt s FA
points = np.array([(0.362568364506, 0.472712355305, 0.347003084477),
(0.733731893414, 0.634480295684, 0.950513180209),
(0.511239955611, 0.876839441267, 0.418047827863),
(0.0765906233393, 0.527373281342, 0.6509863541),
(0.146694972056, 0.596725793348, 0.894860986685),
(0.513808585741, 0.069576205858, 0.530890338876),
(0.512343805118, 0.663537132612, 0.037689295973),
(0.47282965018, 0.462176697655, 0.14061843691),
(0.240584597123, 0.778660020591, 0.722913476339),
(0.951271745935, 0.967000673944, 0.890661319684)])
hull = qhull.ConvexHull(points)
assert_allclose(hull.volume, 0.14562013, rtol=1e-07,
err_msg="Volume of random polyhedron is incorrect")
assert_allclose(hull.area, 1.6670425, rtol=1e-07,
err_msg="Area of random polyhedron is incorrect")
def _check_barycentric_transforms(self, tri, err_msg="",
unit_cube=False,
unit_cube_tol=0):
"""Check that a triangulation has reasonable barycentric transforms"""
vertices = tri.points[tri.vertices]
sc = 1/(tri.ndim + 1.0)
centroids = vertices.sum(axis=1) * sc
# Either: (i) the simplex has a `nan` barycentric transform,
# or, (ii) the centroid is in the simplex
def barycentric_transform(tr, x):
ndim = tr.shape[1]
r = tr[:,-1,:]
Tinv = tr[:,:-1,:]
return np.einsum('ijk,ik->ij', Tinv, x - r)
eps = np.finfo(float).eps
c = barycentric_transform(tri.transform, centroids)
olderr = np.seterr(invalid="ignore")
try:
ok = np.isnan(c).all(axis=1) | (abs(c - sc)/sc < 0.1).all(axis=1)
finally:
np.seterr(**olderr)
assert_(ok.all(), "%s %s" % (err_msg, np.where(~ok)))
# Invalid simplices must be (nearly) zero volume
q = vertices[:,:-1,:] - vertices[:,-1,None,:]
volume = np.array([np.linalg.det(q[k,:,:])
for k in range(tri.nsimplex)])
ok = np.isfinite(tri.transform[:,0,0]) | (volume < np.sqrt(eps))
assert_(ok.all(), "%s %s" % (err_msg, np.where(~ok)))
# Also, find_simplex for the centroid should end up in some
# simplex for the non-degenerate cases
j = tri.find_simplex(centroids)
ok = (j != -1) | np.isnan(tri.transform[:,0,0])
assert_(ok.all(), "%s %s" % (err_msg, np.where(~ok)))
if unit_cube:
# If in unit cube, no interior point should be marked out of hull
at_boundary = (centroids <= unit_cube_tol).any(axis=1)
at_boundary |= (centroids >= 1 - unit_cube_tol).any(axis=1)
ok = (j != -1) | at_boundary
assert_(ok.all(), "%s %s" % (err_msg, np.where(~ok)))
def test_degenerate_barycentric_transforms(self):
# The triangulation should not produce invalid barycentric
# transforms that stump the simplex finding
data = np.load(os.path.join(os.path.dirname(__file__), 'data',
'degenerate_pointset.npz'))
points = data['c']
data.close()
tri = qhull.Delaunay(points)
# Check that there are not too many invalid simplices
bad_count = np.isnan(tri.transform[:,0,0]).sum()
assert_(bad_count < 20, bad_count)
# Check the transforms
self._check_barycentric_transforms(tri)
@dec.slow
def test_more_barycentric_transforms(self):
# Triangulate some "nasty" grids
eps = np.finfo(float).eps
npoints = {2: 70, 3: 11, 4: 5, 5: 3}
_is_32bit_platform = np.intp(0).itemsize < 8
for ndim in xrange(2, 6):
# Generate an uniform grid in n-d unit cube
x = np.linspace(0, 1, npoints[ndim])
grid = np.c_[list(map(np.ravel, np.broadcast_arrays(*np.ix_(*([x]*ndim)))))].T
err_msg = "ndim=%d" % ndim
# Check using regular grid
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True)
# Check with eps-perturbations
np.random.seed(1234)
m = (np.random.rand(grid.shape[0]) < 0.2)
grid[m,:] += 2*eps*(np.random.rand(*grid[m,:].shape) - 0.5)
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=2*eps)
# Check with duplicated data
tri = qhull.Delaunay(np.r_[grid, grid])
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=2*eps)
if not _is_32bit_platform:
# test numerically unstable, and reported to fail on 32-bit
# installs
# Check with larger perturbations
np.random.seed(4321)
m = (np.random.rand(grid.shape[0]) < 0.2)
grid[m,:] += 1000*eps*(np.random.rand(*grid[m,:].shape) - 0.5)
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=1500*eps)
# Check with yet larger perturbations
np.random.seed(4321)
m = (np.random.rand(grid.shape[0]) < 0.2)
grid[m,:] += 1e6*eps*(np.random.rand(*grid[m,:].shape) - 0.5)
tri = qhull.Delaunay(grid)
self._check_barycentric_transforms(tri, err_msg=err_msg,
unit_cube=True,
unit_cube_tol=1e7*eps)
class TestVertexNeighborVertices(object):
def _check(self, tri):
expected = [set() for j in range(tri.points.shape[0])]
for s in tri.simplices:
for a in s:
for b in s:
if a != b:
expected[a].add(b)
indices, indptr = tri.vertex_neighbor_vertices
got = []
for j in range(tri.points.shape[0]):
got.append(set(map(int, indptr[indices[j]:indices[j+1]])))
assert_equal(got, expected, err_msg="%r != %r" % (got, expected))
def test_triangle(self):
points = np.array([(0,0), (0,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
self._check(tri)
def test_rectangle(self):
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
self._check(tri)
def test_complicated(self):
points = np.array([(0,0), (0,1), (1,1), (1,0),
(0.5, 0.5), (0.9, 0.5)], dtype=np.double)
tri = qhull.Delaunay(points)
self._check(tri)
class TestDelaunay(object):
"""
Check that triangulation works.
"""
def test_masked_array_fails(self):
masked_array = np.ma.masked_all(1)
assert_raises(ValueError, qhull.Delaunay, masked_array)
def test_array_with_nans_fails(self):
points_with_nan = np.array([(0,0), (0,1), (1,1), (1,np.nan)], dtype=np.double)
assert_raises(ValueError, qhull.Delaunay, points_with_nan)
def test_nd_simplex(self):
# simple smoke test: triangulate a n-dimensional simplex
for nd in xrange(2, 8):
points = np.zeros((nd+1, nd))
for j in xrange(nd):
points[j,j] = 1.0
points[-1,:] = 1.0
tri = qhull.Delaunay(points)
tri.vertices.sort()
assert_equal(tri.vertices, np.arange(nd+1, dtype=int)[None,:])
assert_equal(tri.neighbors, -1 + np.zeros((nd+1), dtype=int)[None,:])
def test_2d_square(self):
# simple smoke test: 2d square
points = np.array([(0,0), (0,1), (1,1), (1,0)], dtype=np.double)
tri = qhull.Delaunay(points)
assert_equal(tri.vertices, [[1, 3, 2], [3, 1, 0]])
assert_equal(tri.neighbors, [[-1, -1, 1], [-1, -1, 0]])
def test_duplicate_points(self):
x = np.array([0, 1, 0, 1], dtype=np.float64)
y = np.array([0, 0, 1, 1], dtype=np.float64)
xp = np.r_[x, x]
yp = np.r_[y, y]
# shouldn't fail on duplicate points
tri = qhull.Delaunay(np.c_[x, y])
tri2 = qhull.Delaunay(np.c_[xp, yp])
def test_pathological(self):
# both should succeed
points = DATASETS['pathological-1']
tri = qhull.Delaunay(points)
assert_equal(tri.points[tri.vertices].max(), points.max())
assert_equal(tri.points[tri.vertices].min(), points.min())
points = DATASETS['pathological-2']
tri = qhull.Delaunay(points)
assert_equal(tri.points[tri.vertices].max(), points.max())
assert_equal(tri.points[tri.vertices].min(), points.min())
def test_joggle(self):
# Check that the option QJ indeed guarantees that all input points
# occur as vertices of the triangulation
points = np.random.rand(10, 2)
points = np.r_[points, points] # duplicate input data
tri = qhull.Delaunay(points, qhull_options="QJ Qbb Pp")
assert_array_equal(np.unique(tri.simplices.ravel()),
np.arange(len(points)))
def test_coplanar(self):
# Check that the coplanar point output option indeed works
points = np.random.rand(10, 2)
points = np.r_[points, points] # duplicate input data
tri = qhull.Delaunay(points)
assert_(len(np.unique(tri.simplices.ravel())) == len(points)//2)
assert_(len(tri.coplanar) == len(points)//2)
assert_(len(np.unique(tri.coplanar[:,2])) == len(points)//2)
assert_(np.all(tri.vertex_to_simplex >= 0))
def test_furthest_site(self):
points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
tri = qhull.Delaunay(points, furthest_site=True)
expected = np.array([(1, 4, 0), (4, 2, 0)]) # from Qhull
assert_array_equal(tri.simplices, expected)
def test_incremental(self):
# Test incremental construction of the triangulation
def check(name):
chunks, opts = INCREMENTAL_DATASETS[name]
points = np.concatenate(chunks, axis=0)
obj = qhull.Delaunay(chunks[0], incremental=True,
qhull_options=opts)
for chunk in chunks[1:]:
obj.add_points(chunk)
obj2 = qhull.Delaunay(points)
obj3 = qhull.Delaunay(chunks[0], incremental=True,
qhull_options=opts)
if len(chunks) > 1:
obj3.add_points(np.concatenate(chunks[1:], axis=0),
restart=True)
# Check that the incremental mode agrees with upfront mode
if name.startswith('pathological'):
# XXX: These produce valid but different triangulations.
# They look OK when plotted, but how to check them?
assert_array_equal(np.unique(obj.simplices.ravel()),
np.arange(points.shape[0]))
assert_array_equal(np.unique(obj2.simplices.ravel()),
np.arange(points.shape[0]))
else:
assert_unordered_tuple_list_equal(obj.simplices, obj2.simplices,
tpl=sorted_tuple)
assert_unordered_tuple_list_equal(obj2.simplices, obj3.simplices,
tpl=sorted_tuple)
for name in sorted(INCREMENTAL_DATASETS):
yield check, name
def assert_hulls_equal(points, facets_1, facets_2):
# Check that two convex hulls constructed from the same point set
# are equal
facets_1 = set(map(sorted_tuple, facets_1))
facets_2 = set(map(sorted_tuple, facets_2))
if facets_1 != facets_2 and points.shape[1] == 2:
# The direct check fails for the pathological cases
# --- then the convex hull from Delaunay differs (due
# to rounding error etc.) from the hull computed
# otherwise, by the question whether (tricoplanar)
# points that lie almost exactly on the hull are
# included as vertices of the hull or not.
#
# So we check the result, and accept it if the Delaunay
# hull line segments are a subset of the usual hull.
eps = 1000 * np.finfo(float).eps
for a, b in facets_1:
for ap, bp in facets_2:
t = points[bp] - points[ap]
t /= np.linalg.norm(t) # tangent
n = np.array([-t[1], t[0]]) # normal
# check that the two line segments are parallel
# to the same line
c1 = np.dot(n, points[b] - points[ap])
c2 = np.dot(n, points[a] - points[ap])
if not np.allclose(np.dot(c1, n), 0):
continue
if not np.allclose(np.dot(c2, n), 0):
continue
# Check that the segment (a, b) is contained in (ap, bp)
c1 = np.dot(t, points[a] - points[ap])
c2 = np.dot(t, points[b] - points[ap])
c3 = np.dot(t, points[bp] - points[ap])
if c1 < -eps or c1 > c3 + eps:
continue
if c2 < -eps or c2 > c3 + eps:
continue
# OK:
break
else:
raise AssertionError("comparison fails")
# it was OK
return
assert_equal(facets_1, facets_2)
class TestConvexHull:
def test_masked_array_fails(self):
masked_array = np.ma.masked_all(1)
assert_raises(ValueError, qhull.ConvexHull, masked_array)
def test_array_with_nans_fails(self):
points_with_nan = np.array([(0,0), (1,1), (2,np.nan)], dtype=np.double)
assert_raises(ValueError, qhull.ConvexHull, points_with_nan)
def test_hull_consistency_tri(self):
# Check that a convex hull returned by qhull in ndim
# and the hull constructed from ndim delaunay agree
def check(name):
points = DATASETS[name]
tri = qhull.Delaunay(points)
hull = qhull.ConvexHull(points)
assert_hulls_equal(points, tri.convex_hull, hull.simplices)
# Check that the hull extremes are as expected
if points.shape[1] == 2:
assert_equal(np.unique(hull.simplices), np.sort(hull.vertices))
else:
assert_equal(np.unique(hull.simplices), hull.vertices)
for name in sorted(DATASETS):
yield check, name
def test_incremental(self):
# Test incremental construction of the convex hull
def check(name):
chunks, _ = INCREMENTAL_DATASETS[name]
points = np.concatenate(chunks, axis=0)
obj = qhull.ConvexHull(chunks[0], incremental=True)
for chunk in chunks[1:]:
obj.add_points(chunk)
obj2 = qhull.ConvexHull(points)
obj3 = qhull.ConvexHull(chunks[0], incremental=True)
if len(chunks) > 1:
obj3.add_points(np.concatenate(chunks[1:], axis=0),
restart=True)
# Check that the incremental mode agrees with upfront mode
assert_hulls_equal(points, obj.simplices, obj2.simplices)
assert_hulls_equal(points, obj.simplices, obj3.simplices)
for name in sorted(INCREMENTAL_DATASETS):
yield check, name
def test_vertices_2d(self):
# The vertices should be in counterclockwise order in 2-D
np.random.seed(1234)
points = np.random.rand(30, 2)
hull = qhull.ConvexHull(points)
assert_equal(np.unique(hull.simplices), np.sort(hull.vertices))
# Check counterclockwiseness
x, y = hull.points[hull.vertices].T
angle = np.arctan2(y - y.mean(), x - x.mean())
assert_(np.all(np.diff(np.unwrap(angle)) > 0))
def test_volume_area(self):
# Basic check that we get back the correct volume and area for a cube
points = np.array([(0, 0, 0), (0, 1, 0), (1, 0, 0), (1, 1, 0),
(0, 0, 1), (0, 1, 1), (1, 0, 1), (1, 1, 1)])
tri = qhull.ConvexHull(points)
assert_allclose(tri.volume, 1., rtol=1e-14)
assert_allclose(tri.area, 6., rtol=1e-14)
class TestVoronoi:
def test_masked_array_fails(self):
masked_array = np.ma.masked_all(1)
assert_raises(ValueError, qhull.Voronoi, masked_array)
def test_simple(self):
# Simple case with known Voronoi diagram
points = [(0, 0), (0, 1), (0, 2),
(1, 0), (1, 1), (1, 2),
(2, 0), (2, 1), (2, 2)]
# qhull v o Fv Qbb Qc Qz < dat
output = """
2
5 10 1
-10.101 -10.101
0.5 0.5
1.5 0.5
0.5 1.5
1.5 1.5
2 0 1
3 3 0 1
2 0 3
3 2 0 1
4 4 3 1 2
3 4 0 3
2 0 2
3 4 0 2
2 0 4
0
12
4 0 3 0 1
4 0 1 0 1
4 1 4 1 3
4 1 2 0 3
4 2 5 0 3
4 3 4 1 2
4 3 6 0 2
4 4 5 3 4
4 4 7 2 4
4 5 8 0 4
4 6 7 0 2
4 7 8 0 4
"""
self._compare_qvoronoi(points, output)
def _compare_qvoronoi(self, points, output, **kw):
"""Compare to output from 'qvoronoi o Fv < data' to Voronoi()"""
# Parse output
output = [list(map(float, x.split())) for x in output.strip().splitlines()]
nvertex = int(output[1][0])
vertices = list(map(tuple, output[3:2+nvertex])) # exclude inf
nregion = int(output[1][1])
regions = [[int(y)-1 for y in x[1:]]
for x in output[2+nvertex:2+nvertex+nregion]]
nridge = int(output[2+nvertex+nregion][0])
ridge_points = [[int(y) for y in x[1:3]]
for x in output[3+nvertex+nregion:]]
ridge_vertices = [[int(y)-1 for y in x[3:]]
for x in output[3+nvertex+nregion:]]
# Compare results
vor = qhull.Voronoi(points, **kw)
def sorttuple(x):
return tuple(sorted(x))
assert_allclose(vor.vertices, vertices)
assert_equal(set(map(tuple, vor.regions)),
set(map(tuple, regions)))
p1 = list(zip(list(map(sorttuple, ridge_points)), list(map(sorttuple, ridge_vertices))))
p2 = list(zip(list(map(sorttuple, vor.ridge_points.tolist())),
list(map(sorttuple, vor.ridge_vertices))))
p1.sort()
p2.sort()
assert_equal(p1, p2)
def test_ridges(self):
# Check that the ridges computed by Voronoi indeed separate
# the regions of nearest neighborhood, by comparing the result
# to KDTree.
def check(name):
points = DATASETS[name]
tree = KDTree(points)
vor = qhull.Voronoi(points)
for p, v in vor.ridge_dict.items():
# consider only finite ridges
if not np.all(np.asarray(v) >= 0):
continue
ridge_midpoint = vor.vertices[v].mean(axis=0)
d = 1e-6 * (points[p[0]] - ridge_midpoint)
dist, k = tree.query(ridge_midpoint + d, k=1)
assert_equal(k, p[0])
dist, k = tree.query(ridge_midpoint - d, k=1)
assert_equal(k, p[1])
for name in DATASETS:
yield check, name
def test_furthest_site(self):
points = [(0, 0), (0, 1), (1, 0), (0.5, 0.5), (1.1, 1.1)]
# qhull v o Fv Qbb Qc Qu < dat
output = """
2
3 5 1
-10.101 -10.101
0.6000000000000001 0.5
0.5 0.6000000000000001
3 0 1 2
2 0 1
2 0 2
0
3 0 1 2
5
4 0 2 0 2
4 0 1 0 1
4 0 4 1 2
4 1 4 0 1
4 2 4 0 2
"""
self._compare_qvoronoi(points, output, furthest_site=True)
def test_incremental(self):
# Test incremental construction of the triangulation
def check(name):
chunks, opts = INCREMENTAL_DATASETS[name]
points = np.concatenate(chunks, axis=0)
obj = qhull.Voronoi(chunks[0], incremental=True,
qhull_options=opts)
for chunk in chunks[1:]:
obj.add_points(chunk)
obj2 = qhull.Voronoi(points)
obj3 = qhull.Voronoi(chunks[0], incremental=True,
qhull_options=opts)
if len(chunks) > 1:
obj3.add_points(np.concatenate(chunks[1:], axis=0),
restart=True)
# -- Check that the incremental mode agrees with upfront mode
assert_equal(len(obj.point_region), len(obj2.point_region))
assert_equal(len(obj.point_region), len(obj3.point_region))
# The vertices may be in different order or duplicated in
# the incremental map
for objx in obj, obj3:
vertex_map = {-1: -1}
for i, v in enumerate(objx.vertices):
for j, v2 in enumerate(obj2.vertices):
if np.allclose(v, v2):
vertex_map[i] = j
def remap(x):
if hasattr(x, '__len__'):
return tuple(set([remap(y) for y in x]))
try:
return vertex_map[x]
except KeyError:
raise AssertionError("incremental result has spurious vertex at %r"
% (objx.vertices[x],))
def simplified(x):
items = set(map(sorted_tuple, x))
if () in items:
items.remove(())
items = [x for x in items if len(x) > 1]
items.sort()
return items
assert_equal(
simplified(remap(objx.regions)),
simplified(obj2.regions)
)
assert_equal(
simplified(remap(objx.ridge_vertices)),
simplified(obj2.ridge_vertices)
)
# XXX: compare ridge_points --- not clear exactly how to do this
for name in sorted(INCREMENTAL_DATASETS):
if INCREMENTAL_DATASETS[name][0][0].shape[1] > 3:
# too slow (testing of the result --- qhull is still fast)
continue
yield check, name
if __name__ == "__main__":
run_module_suite()
| 36.451178 | 97 | 0.513609 |
acfa06e4898ace0212801d72789de2e5464abb78 | 1,990 | py | Python | pvlib/test/conftest.py | alamathe1/pvlib-python | ec996611887747bdef43db874147095e3337721a | [
"BSD-3-Clause"
] | null | null | null | pvlib/test/conftest.py | alamathe1/pvlib-python | ec996611887747bdef43db874147095e3337721a | [
"BSD-3-Clause"
] | null | null | null | pvlib/test/conftest.py | alamathe1/pvlib-python | ec996611887747bdef43db874147095e3337721a | [
"BSD-3-Clause"
] | null | null | null | import sys
import platform
from pkg_resources import parse_version
import pandas as pd
import numpy as np
import pytest
skip_windows = pytest.mark.skipif('win' in sys.platform,
reason='does not run on windows')
try:
import scipy
has_scipy = True
except ImportError:
has_scipy = False
requires_scipy = pytest.mark.skipif(not has_scipy, reason='requires scipy')
try:
import tables
has_tables = True
except ImportError:
has_tables = False
requires_tables = pytest.mark.skipif(not has_tables, reason='requires tables')
try:
import ephem
has_ephem = True
except ImportError:
has_ephem = False
requires_ephem = pytest.mark.skipif(not has_ephem, reason='requires ephem')
def pandas_0_17():
return parse_version(pd.__version__) >= parse_version('0.17.0')
needs_pandas_0_17 = pytest.mark.skipif(
not pandas_0_17(), reason='requires pandas 0.17 or greater')
def numpy_1_10():
return parse_version(np.__version__) >= parse_version('1.10.0')
needs_numpy_1_10 = pytest.mark.skipif(
not numpy_1_10(), reason='requires numpy 1.10 or greater')
def pandas_0_22():
return parse_version(pd.__version__) >= parse_version('0.22.0')
def has_spa_c():
try:
from pvlib.spa_c_files.spa_py import spa_calc
except ImportError:
return False
else:
return True
requires_spa_c = pytest.mark.skipif(not has_spa_c(), reason="requires spa_c")
def has_numba():
try:
import numba
except ImportError:
return True
else:
vers = numba.__version__.split('.')
if int(vers[0] + vers[1]) < 17:
return False
else:
return True
requires_numba = pytest.mark.skipif(not has_numba(), reason="requires numba")
try:
import siphon
has_siphon = True
except ImportError:
has_siphon = False
requires_siphon = pytest.mark.skipif(not has_siphon,
reason='requires siphon')
| 22.111111 | 78 | 0.675879 |
acfa06f1fa3d12f3bff5b36c1f328866f124ac2a | 4,972 | py | Python | src/garage/experiment/meta_evaluator.py | kristian-georgiev/garage | f75475d5ace1e2f3c49887b2fc2c40edb306d29d | [
"MIT"
] | null | null | null | src/garage/experiment/meta_evaluator.py | kristian-georgiev/garage | f75475d5ace1e2f3c49887b2fc2c40edb306d29d | [
"MIT"
] | null | null | null | src/garage/experiment/meta_evaluator.py | kristian-georgiev/garage | f75475d5ace1e2f3c49887b2fc2c40edb306d29d | [
"MIT"
] | null | null | null | """Evaluator which tests Meta-RL algorithms on test environments."""
import pdb
from dowel import logger, tabular
from garage import EpisodeBatch, log_multitask_performance
from garage.experiment.deterministic import get_seed
from garage.sampler import DefaultWorker, LocalSampler, WorkerFactory
class MetaEvaluator:
"""Evaluates Meta-RL algorithms on test environments.
Args:
test_task_sampler (TaskSampler): Sampler for test
tasks. To demonstrate the effectiveness of a meta-learning method,
these should be different from the training tasks.
n_test_tasks (int or None): Number of test tasks to sample each time
evaluation is performed. Note that tasks are sampled "without
replacement". If None, is set to `test_task_sampler.n_tasks`.
n_exploration_eps (int): Number of episodes to gather from the
exploration policy before requesting the meta algorithm to produce
an adapted policy.
n_test_episodes (int): Number of episodes to use for each adapted
policy. The adapted policy should forget previous episodes when
`.reset()` is called.
prefix (str): Prefix to use when logging. Defaults to MetaTest. For
example, this results in logging the key 'MetaTest/SuccessRate'.
If not set to `MetaTest`, it should probably be set to `MetaTrain`.
test_task_names (list[str]): List of task names to test. Should be in
an order consistent with the `task_id` env_info, if that is
present.
worker_class (type): Type of worker the Sampler should use.
worker_args (dict or None): Additional arguments that should be
passed to the worker.
"""
# pylint: disable=too-few-public-methods
def __init__(self,
*,
test_task_sampler,
n_exploration_eps=10,
n_test_tasks=None,
n_test_episodes=1,
n_grad_updates=1,
prefix='MetaTest',
test_task_names=None,
worker_class=DefaultWorker,
worker_args=None,
verbose=False):
self._test_task_sampler = test_task_sampler
self._worker_class = worker_class
if worker_args is None:
self._worker_args = {}
else:
self._worker_args = worker_args
if n_test_tasks is None:
n_test_tasks = test_task_sampler.n_tasks
self._n_test_tasks = n_test_tasks
self._n_test_episodes = n_test_episodes
self._n_exploration_eps = n_exploration_eps
self._n_grad_updates = n_grad_updates
self._eval_itr = 0
self._prefix = prefix
self._test_task_names = test_task_names
self._test_sampler = None
self._max_episode_length = None
self._verbose = verbose
def evaluate(self, algo, test_episodes_per_task=None):
"""Evaluate the Meta-RL algorithm on the test tasks.
Args:
algo (MetaRLAlgorithm): The algorithm to evaluate.
test_episodes_per_task (int or None): Number of episodes per task.
"""
if test_episodes_per_task is None:
test_episodes_per_task = self._n_test_episodes
adapted_episodes = []
logger.log('Sampling for adapation and meta-testing...')
env_updates = self._test_task_sampler.sample(self._n_test_tasks)
for env_up in env_updates:
policy = algo.get_exploration_policy()
eps = EpisodeBatch.concatenate(*[
algo._sampler.obtain_samples(self._eval_itr, 1,
policy,
env_up)
for _ in range(self._n_exploration_eps)
])
adapted_policy = algo.get_adapted_test_policy(policy, eps)
adapted_eps = algo._sampler.obtain_samples(
self._eval_itr,
test_episodes_per_task * env_up().spec.max_episode_length,
adapted_policy)
adapted_episodes.append(adapted_eps)
if self._verbose:
for ep in adapted_episodes:
print(ep.env_infos['task'][0])
print(f'last observations: {ep.last_observations}')
print('------------------------------------')
logger.log('Finished meta-testing...')
if self._test_task_names is not None:
name_map = dict(enumerate(self._test_task_names))
else:
name_map = None
with tabular.prefix(self._prefix + '/' if self._prefix else ''):
log_multitask_performance(
self._eval_itr,
EpisodeBatch.concatenate(*adapted_episodes),
getattr(algo, 'discount', 1.0),
name_map=name_map)
self._eval_itr += 1
return adapted_episodes
| 41.090909 | 79 | 0.613636 |
acfa07610c82c4ae54e53eac00ed288517709429 | 380 | py | Python | crawler/code/supplemental/blocking-fetch.py | kennywbin/500lines | e72f05bac2087f368251d3f263ae325c268e5171 | [
"CC-BY-3.0"
] | null | null | null | crawler/code/supplemental/blocking-fetch.py | kennywbin/500lines | e72f05bac2087f368251d3f263ae325c268e5171 | [
"CC-BY-3.0"
] | null | null | null | crawler/code/supplemental/blocking-fetch.py | kennywbin/500lines | e72f05bac2087f368251d3f263ae325c268e5171 | [
"CC-BY-3.0"
] | null | null | null | import socket
def threaded_method():
sock = socket.socket()
sock.connect(('xkcd.com', 80))
request = 'GET /353/ HTTP/1.0\r\nHost: xkcd.com\r\n\r\n'
sock.send(request.encode('ascii'))
response = b''
chunk = sock.recv(4096)
while chunk:
response += chunk
chunk = sock.recv(4096)
print(response)
threaded_method()
| 21.111111 | 61 | 0.584211 |
acfa077bdc426060019bd6378467f576696a6174 | 811 | py | Python | saefportal/users/views/event_log_views.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | null | null | null | saefportal/users/views/event_log_views.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | null | null | null | saefportal/users/views/event_log_views.py | harry-consulting/SAEF1 | 055d6e492ba76f90e3248b9da2985fdfe0c6b430 | [
"BSD-2-Clause"
] | 1 | 2020-12-16T15:02:52.000Z | 2020-12-16T15:02:52.000Z | from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic import TemplateView
from rest_framework_tracking.models import APIRequestLog
from users.models import AdministrativeEvent
from users.util import get_saef_events, get_run_events
@method_decorator(login_required, name="dispatch")
class EventLogView(TemplateView):
template_name = "users/event_log.html"
def get_context_data(self, **kwargs):
context = super(EventLogView, self).get_context_data(**kwargs)
context["api_events"] = APIRequestLog.objects.all()
context["admin_events"] = AdministrativeEvent.objects.all()
context["saef_events"] = get_saef_events()
context["run_events"] = get_run_events()
return context
| 36.863636 | 70 | 0.76942 |
acfa07a0ee146d4e89f01cba5f534d105658dece | 7,331 | py | Python | drone_ws/src/rqt_reconfigure/src/rqt_reconfigure/param_widget.py | Advanced-Coordinational-Robotics/Obstacle-Avoidance-on-UAV | 575947c47672f42993f4ef00a72c3154ec18d878 | [
"MIT"
] | 3 | 2019-10-13T11:47:41.000Z | 2022-02-27T22:18:46.000Z | drone_ws/src/rqt_reconfigure/src/rqt_reconfigure/param_widget.py | acr-iitkgp/Obstacle-Avoidance-on-UAV | 575947c47672f42993f4ef00a72c3154ec18d878 | [
"MIT"
] | 1 | 2020-08-24T03:28:49.000Z | 2020-08-24T03:28:49.000Z | drone_ws/src/rqt_reconfigure/src/rqt_reconfigure/param_widget.py | acr-iitkgp/Obstacle-Avoidance-on-UAV | 575947c47672f42993f4ef00a72c3154ec18d878 | [
"MIT"
] | 1 | 2019-10-12T17:07:01.000Z | 2019-10-12T17:07:01.000Z | # Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Isaac Saito
from __future__ import division
import rospkg
import sys
from python_qt_binding.QtCore import Signal, QMargins
from python_qt_binding.QtWidgets import (QLabel, QHBoxLayout, QSplitter,
QVBoxLayout, QWidget)
from rqt_reconfigure.node_selector_widget import NodeSelectorWidget
from rqt_reconfigure.paramedit_widget import ParameditWidget
from rqt_reconfigure.text_filter import TextFilter
from rqt_reconfigure.text_filter_widget import TextFilterWidget
import rospy
class ParamWidget(QWidget):
_TITLE_PLUGIN = 'Dynamic Reconfigure'
# To be connected to PluginContainerWidget
sig_sysmsg = Signal(str)
sig_sysprogress = Signal(str)
# To make selections from CLA
sig_selected = Signal(str)
def __init__(self, context, node=None):
"""
This class is intended to be called by rqt plugin framework class.
Currently (12/12/2012) the whole widget is splitted into 2 panes:
one on left allows you to choose the node(s) you work on. Right side
pane lets you work with the parameters associated with the node(s) you
select on the left.
(12/27/2012) Despite the pkg name is changed to rqt_reconfigure to
reflect the available functionality, file & class names remain
'param', expecting all the parameters will become handle-able.
"""
super(ParamWidget, self).__init__()
self.setObjectName(self._TITLE_PLUGIN)
self.setWindowTitle(self._TITLE_PLUGIN)
rp = rospkg.RosPack()
#TODO: .ui file needs to replace the GUI components declaration
# below. For unknown reason, referring to another .ui files
# from a .ui that is used in this class failed. So for now,
# I decided not use .ui in this class.
# If someone can tackle this I'd appreciate.
_hlayout_top = QHBoxLayout(self)
_hlayout_top.setContentsMargins(QMargins(0, 0, 0, 0))
self._splitter = QSplitter(self)
_hlayout_top.addWidget(self._splitter)
_vlayout_nodesel_widget = QWidget()
_vlayout_nodesel_side = QVBoxLayout()
_hlayout_filter_widget = QWidget(self)
_hlayout_filter = QHBoxLayout()
self._text_filter = TextFilter()
self.filter_lineedit = TextFilterWidget(self._text_filter, rp)
self.filterkey_label = QLabel("&Filter key:")
self.filterkey_label.setBuddy(self.filter_lineedit)
_hlayout_filter.addWidget(self.filterkey_label)
_hlayout_filter.addWidget(self.filter_lineedit)
_hlayout_filter_widget.setLayout(_hlayout_filter)
self._nodesel_widget = NodeSelectorWidget(self, rp, self.sig_sysmsg)
_vlayout_nodesel_side.addWidget(_hlayout_filter_widget)
_vlayout_nodesel_side.addWidget(self._nodesel_widget)
_vlayout_nodesel_side.setSpacing(1)
_vlayout_nodesel_widget.setLayout(_vlayout_nodesel_side)
reconf_widget = ParameditWidget(rp)
self._splitter.insertWidget(0, _vlayout_nodesel_widget)
self._splitter.insertWidget(1, reconf_widget)
# 1st column, _vlayout_nodesel_widget, to minimize width.
# 2nd col to keep the possible max width.
self._splitter.setStretchFactor(0, 0)
self._splitter.setStretchFactor(1, 1)
# Signal from paramedit widget to node selector widget.
reconf_widget.sig_node_disabled_selected.connect(
self._nodesel_widget.node_deselected)
# Pass name of node to editor widget
self._nodesel_widget.sig_node_selected.connect(
reconf_widget.show_reconf)
if not node:
title = self._TITLE_PLUGIN
else:
title = self._TITLE_PLUGIN + ' %s' % node
self.setObjectName(title)
#Connect filter signal-slots.
self._text_filter.filter_changed_signal.connect(
self._filter_key_changed)
# Open any clients indicated from command line
self.sig_selected.connect(self._nodesel_widget.node_selected)
for rn in [rospy.resolve_name(c) for c in context.argv()]:
if rn in self._nodesel_widget.get_paramitems():
self.sig_selected.emit(rn)
else:
rospy.logwarn('Could not find a dynamic reconfigure client named \'%s\'', str(rn))
def shutdown(self):
#TODO: Needs implemented. Trigger dynamic_reconfigure to unlatch
# subscriber.
pass
def save_settings(self, plugin_settings, instance_settings):
instance_settings.set_value('splitter', self._splitter.saveState())
def restore_settings(self, plugin_settings, instance_settings):
if instance_settings.contains('splitter'):
self._splitter.restoreState(instance_settings.value('splitter'))
else:
self._splitter.setSizes([100, 100, 200])
def get_filter_text(self):
"""
:rtype: QString
"""
return self.filter_lineedit.text()
def _filter_key_changed(self):
self._nodesel_widget.set_filter(self._text_filter)
#TODO: This method should be integrated into common architecture. I just
# can't think of how to do so within current design.
def emit_sysmsg(self, msg_str):
self.sig_sysmsg.emit(msg_str)
if __name__ == '__main__':
# main should be used only for debug purpose.
# This launches this QWidget as a standalone rqt gui.
from rqt_gui.main import Main
main = Main()
sys.exit(main.main(sys.argv, standalone='rqt_reconfigure'))
| 41.653409 | 98 | 0.695403 |
acfa08450c42c2dc42047946de12a87d3e5ca500 | 14,814 | py | Python | EWS/pcrasterModules/evapotranspirationpenman.py | KoenvanLoon/EWS | 3447921ec2140f29fd69d5b140b5eba2f244bccd | [
"MIT"
] | null | null | null | EWS/pcrasterModules/evapotranspirationpenman.py | KoenvanLoon/EWS | 3447921ec2140f29fd69d5b140b5eba2f244bccd | [
"MIT"
] | null | null | null | EWS/pcrasterModules/evapotranspirationpenman.py | KoenvanLoon/EWS | 3447921ec2140f29fd69d5b140b5eba2f244bccd | [
"MIT"
] | null | null | null | import pcraster as pcr
import component
# Notes:
# time step duration in h
# vertical fluxes in m/h, variable name 'flux'
# vertical fluxes over a time step in m, variable name 'fluxAmount'
# amounts in storages in m (stores), variable name 'store'
# (everything as a waterslice over a whole cell)
# if unit cannot be derived in this way (e.g. flux/fluxAmount/store), unit is indicated
# inputs of function is PCRaster type, inside function Python types are used
# equations for the potential evapotranspiration Penman-Monteith
# based on the Thesis of van der Kwast, 2009. Otherwise it is indicated.
['airTemperature', 'albedo', 'clearSkyIncomingShortwaveRadiationFlatSurface', 'cloudFactor', 'elevationAboveSeaLevelOfMeteoStation', 'evapotranspirationOccurs', 'fWaterPotential', 'fractionReceivedFlatSurface', 'incomingShortwaveRadiation', 'incomingShortwaveRadiationFlatSurface', 'maxStomatalConduc', 'potentialEvapotranspirationAmount', 'potentialEvapotranspirationFlux', 'potentialEvapotranspirationFromCanopyAmount', 'potentialEvapotranspirationFromCanopyFlux', 'relativeHumidity', 'setOfVariablesToReport', 'timeStepDuration', 'timeStepsToReport', 'variablesToReport', 'vegHeight', 'windVelocityNotZero']
class EvapotranspirationPenman(component.Component):
def __init__(self, timeStepDuration, albedo, maxStomatalConductance, vegetationHeight, LAI,
timeStepsToReport, setOfVariablesToReport):
# init only for suspend and resume in filter
self.variablesToReport = {}
self.aboveVegHeight = pcr.scalar(0)
self.aerodynamicRes = pcr.scalar(0)
self.airTemperature = pcr.scalar(0)
self.windVelocityAboveVegHeight = pcr.scalar(0)
self.clearSkyIncomingShortwaveRadiationFlatSurface = pcr.scalar(0)
self.cloudFactor = pcr.scalar(0)
self.elevationAboveSeaLevelOfMeteoStation = pcr.scalar(0)
self.evapotranspirationOccurs = pcr.scalar(0)
self.fWaterPotential = pcr.scalar(0)
self.fractionReceivedFlatSurface = pcr.scalar(0)
self.incomingShortwaveRadiation = pcr.scalar(0)
self.incomingShortwaveRadiationFlatSurface = pcr.scalar(0)
self.potentialEvapotranspirationAmount = pcr.scalar(0)
self.potentialEvapotranspirationFlux = pcr.scalar(0)
self.potentialEvapotranspirationFromCanopyAmount = pcr.scalar(0)
self.potentialEvapotranspirationFromCanopyFlux = pcr.scalar(0)
self.relativeHumidity = pcr.scalar(0)
self.windVelocityNotZero = pcr.scalar(0)
# real inits
# maxStomatalConduc: maximum stomatal conductance (m s-1), appr. 0.04 m/s
# vegetationHeight: height of vegetation, m
self.timeStepDuration = timeStepDuration
self.albedo = pcr.scalar(albedo)
self.maxStomatalConduc = maxStomatalConductance
self.vegHeight = vegetationHeight
self.LAI = LAI
self.timeStepsToReport = timeStepsToReport
self.setOfVariablesToReport = setOfVariablesToReport
def reportAsMaps(self, sample, timestep):
self.output_mapping = {
'Ep': self.potentialEvapotranspirationFlux,
# 'gs': self.maxStomatalConduc,
# 'ra': self.aerodynamicRes,
# 'windz': self.windVelocityAboveVegHeight,
'Epc': self.potentialEvapotranspirationFromCanopyFlux,
# 'Ecl': self.cloudFactor,
# 'Ecs': self.clearSkyIncomingShortwaveRadiationFlatSurface,
# 'Eis': self.incomingShortwaveRadiationFlatSurface
}
# reports
self.variablesToReport = self.rasters_to_report(self.setOfVariablesToReport)
self.reportMaps(sample, timestep)
def updateVariablesAsNumpyToReport(self):
self.variablesAsNumpyToReport = {
'gc': self.maxStomatalConduc,
# 'Ecl': self.cloudFactor,
# 'Ecs': self.clearSkyIncomingShortwaveRadiationFlatSurface,
# 'Eis': self.incomingShortwaveRadiationFlatSurface,
}
def reportAsNumpyOneFile(self, locations, sample, timestep, endTimeStep):
self.updateVariablesAsNumpyToReport()
self.reportAsNumpyOneFilePerRealization(locations, sample, timestep, endTimeStep)
def reportAsNumpyMultipleFiles(self, locations, sample, timestep):
self.updateVariablesAsNumpyToReport()
self.reportAsNumpy(locations, sample, timestep)
def fluxToAmount(self, flux):
fluxAmount = flux * self.timeStepDuration
return fluxAmount
def calculateClearSkySolarRadiation(self, fractionReceivedFlatSurface):
# calculates incoming shortware radiation of a clear sky in W/m2, for a flat
# surface (to make it comparable with actual incoming shortwave radiation measured at a meteo
# station
# solar constant (W/m2)taken from POTRAD manual and FAO Penman manual, Allen (no date), eg 28
solarConstant = 1368.0
# equation 28, FAO Penman manual (Allen, no date), modified.
extraTerrestrialRadiation = solarConstant * fractionReceivedFlatSurface
# equation 37, FAO Penman manual (Allen, no date), modified.
clearSkySolarRadiation = (0.75 + 0.00002 * self.elevationAboveSeaLevelOfMeteoStation) * extraTerrestrialRadiation
return clearSkySolarRadiation
def potentialEvapotranspiration(self,
airTemperature,
relativeHumidity,
incomingShortwaveRadiation,
incomingShortwaveRadiationFlatSurface,
fractionReceivedFlatSurface,
windVelocity,
elevationAboveSeaLevelOfMeteoStation,
fWaterPotential,
evapotranspirationOccurs):
# airTemperature (C), relativeHumidity (-), incomingShortRadiation (W m-2),
# fractionReceivedFlatSurface (0-1),
# incomingShortwaveRadiation, watt/m2 of soil surface (ie corrected for topography and solar angle, etc)
# incomingShortwaveRadiationFlatSurface, watt/m2, of flat surface (ie raw data from meteo station)
# fractionReceivedFlatSurface is the cross sectional area of a beam divided by its area of incidence on the soil surface
# windVelocity, wind velocity, m/s
# elevationAboveSeaLevelOfMeteoStation (m) is the elevation at the location where the
# fWaterPotential (0-1) reduction factor for stomatal conductance
self.airTemperature = pcr.scalar(airTemperature)
self.relativeHumidity = pcr.scalar(relativeHumidity)
self.incomingShortwaveRadiation = pcr.scalar(incomingShortwaveRadiation)
self.incomingShortwaveRadiationFlatSurface = pcr.scalar(incomingShortwaveRadiationFlatSurface)
self.fractionReceivedFlatSurface = pcr.scalar(fractionReceivedFlatSurface)
self.windVelocityNotZero = pcr.max(0.05, pcr.scalar(windVelocity)) #N FAO recommends u>0.5m/s
self.elevationAboveSeaLevelOfMeteoStation = pcr.scalar(elevationAboveSeaLevelOfMeteoStation)
self.fWaterPotential = fWaterPotential
self.evapotranspirationOccurs = evapotranspirationOccurs
# Radiation module
ST = 5.67 * 10.0**-8.0 # Stephan-Boltzman constant (W m-2 K-4)
# saturated vapour pressure (Pa), eq 8.18 (eq 11 in FAO56)
sVapourPress = 611.0 * pcr.exp((17.27 * self.airTemperature) / (237.3 + self.airTemperature))
# actual vapour pressure (Pa), eq 8.17
aVapourPress = self.relativeHumidity * sVapourPress
self.clearSkyIncomingShortwaveRadiationFlatSurface = pcr.max(0.00000001, self.calculateClearSkySolarRadiation(
self.fractionReceivedFlatSurface))
# cloud factor (-), eq 8.19
self.cloudFactor = pcr.min(pcr.scalar(1.0),
self.incomingShortwaveRadiationFlatSurface / self.clearSkyIncomingShortwaveRadiationFlatSurface)
# potential longwave radiation (W m-2), eq 8.20
pLongRadiation = ST * (self.airTemperature + 273.15)**4.0
# net long wave radiation (W m-2) Feddes et al 1983, eq 8.16
netLongRadiation = pLongRadiation * (0.56 - 0.008 * pcr.sqrt(aVapourPress)) * (0.1 + 0.9 * self.cloudFactor)
# net radiation (W m-2), eq 8.15
netRadiation = pcr.max(0.00001, self.incomingShortwaveRadiation * (1.0 - self.albedo) - netLongRadiation)
# aerodynamic resistance module
# Wind velocity at z (based on equation 5.33, Holton 2004)
observedWindHeight = 3.0 # height of meteo station
self.aboveVegHeight = self.vegHeight + 3.0 # we assume wind measurement is 3 m above vegetation
self.windVelocityAboveVegHeight = self.windVelocityNotZero + (0.3 / 0.41) * pcr.ln(self.aboveVegHeight / observedWindHeight)
# aerodynamic resistance, Grace (1981)
self.aerodynamicRes = pcr.ln((self.aboveVegHeight - 0.7 * self.vegHeight) / (0.1 * self.vegHeight))**2.0 / (0.41**2.0 * self.windVelocityAboveVegHeight)
# self.aerodynamicRes=5.0
# report(pcr.scalar(self.aerodynamicRes),'ra.map')
# surface resistance module
# soil moisture reduction factor for stomatal conductance, simplified (see Brolsma et al 2010)
stomatalConduc = pcr.max(self.maxStomatalConduc * self.fWaterPotential, 0.00001)
# surface (or canopy) resistance (s m-1)
stomatalRes = 1.0 / stomatalConduc
LAIactive = 0.5 * self.LAI # FAO Penman manual (Allen, 1998), Box 5
surfaceRes = stomatalRes / LAIactive # FAO Penman manual (Allen, 1998), equation 5
# other variables
# here with fixed input values for Press (Pa)
# contants
airHeat = 1013.0 # air specific heat at constant pressure (J kg-1 K-1)
latentHeat = 2450000.0 # latent heat of water vaporization (J kg-1) from FAO56 Annex3
# eq 8.29 vapour pressure deficit (Pa)
defiVapourPress = sVapourPress - aVapourPress
# eq 8.31 slope of saturation vapour pressure temperature relationship (Pa K-1) I didn't included 1/274.15 in Hans
slopePressTemp = ((4098.0 * sVapourPress) / (237.3 + self.airTemperature)**2.0)
# eq 8.33 psychrometric constant (Pa K-1), FAO56 eq 8 (Brunt, 1956 see Annex3)
Press = 88400.0 # air pressure at z=1150 m (FAO56)
psychrConstant = 0.665 * 10.0**-3.0 * Press
# final equation for potential evapotranspiration
# contants
airDens = 1.2047 # mean air density (kg m-3)
# eq 8.14 potential evapotranspiration (mm s-1), Monteith, 1981 (or 1965?)
pEvapt = (1.0 / latentHeat) * (((netRadiation * slopePressTemp) + (airDens * airHeat * defiVapourPress / self.aerodynamicRes)) /
(slopePressTemp + psychrConstant * (1.0 + surfaceRes / self.aerodynamicRes)))
self.potentialEvapotranspirationFlux = pEvapt * (3600.0 / 1000.0)
self.potentialEvapotranspirationFlux = pcr.ifthenelse(
self.evapotranspirationOccurs,
self.potentialEvapotranspirationFlux,
pcr.scalar(0))
# potential evapotranspiration amount (m over a timestep)
self.potentialEvapotranspirationAmount = self.fluxToAmount(self.potentialEvapotranspirationFlux)
# same with surface resistance zero (i.e., canopy evapotranspiration)
surfaceRes = 0.0
pEvapt = (1.0 / latentHeat) * (((netRadiation * slopePressTemp) + (airDens * airHeat * defiVapourPress / self.aerodynamicRes)) /
(slopePressTemp + psychrConstant * (1.0 + surfaceRes / self.aerodynamicRes)))
self.potentialEvapotranspirationFromCanopyFlux = pEvapt * (3600.0 / 1000.0)
self.potentialEvapotranspirationFromCanopyFlux = pcr.ifthenelse(
self.evapotranspirationOccurs,
self.potentialEvapotranspirationFromCanopyFlux,
pcr.scalar(0))
# potential evapotranspiration amount (m over a timestep)
self.potentialEvapotranspirationFromCanopyAmount = self.fluxToAmount(self.potentialEvapotranspirationFromCanopyFlux)
# report(self.potentialEvapotranspirationAmount,'jan')
return self.potentialEvapotranspirationFlux, self.potentialEvapotranspirationAmount, \
self.potentialEvapotranspirationFromCanopyFlux, self.potentialEvapotranspirationFromCanopyAmount
## test
## setclone('mergeClone.map')
#timeStepDuration = pcr.scalar(1)
#albedo = pcr.scalar(0.15)
#maxStomatalConductance = 0.0053
#vegetationHeight = 5.0
#timeStepsToReportAll = 'test'
#setOfVariablesToReport = 'test'
## d_penmanPCRasterPython = EvapotranspirationPenman(
## timeStepDuration, \
## albedo, \
## maxStomatalConductance, \
## vegetationHeight, \
## timeStepsToReportAll, \
## setOfVariablesToReport)
## inputs for testing
#airTemperature = pcr.scalar(23)
#relativeHumidity = pcr.scalar(0.6)
#incomingShortwaveRadiationAtSurface = pcr.scalar(300.0)
#incomingShortwaveRadiationFlatSurface = pcr.scalar(300.0)
#fractionReceivedFlatSurface = 1.0
#windVelocity = 0.3
#elevationAboveSeaLevelOfMeteoStation = 900.0
#fWaterPotential = 0.2
#evapotranspirationOccurs = 1
# call function in class
# potentialEvapotranspirationFlux, potentialEvapotranspirationAmount, \
# potentialEvapotranspirationFromCanopyFlux, potentialEvapotranspirationFromCanopyAmount = \
# d_penmanPCRasterPython.potentialEvapotranspiration( \
# airTemperature, \
# relativeHumidity, \
# incomingShortwaveRadiationAtSurface, \
# incomingShortwaveRadiationFlatSurface, \
# fractionReceivedFlatSurface, \
# windVelocity,
# elevationAboveSeaLevelOfMeteoStation,
# fWaterPotential, \
# evapotranspirationOccurs)
# report(potentialEvapotranspirationFlux,'testpetFlux.map')
# report(potentialEvapotranspirationAmount,'testpetAmount.map')
# report(potentialEvapotranspirationFromCanopyFlux,'testpetCanFlux.map')
# report(potentialEvapotranspirationFromCanopyAmount,'testpetCanAmount.map')
| 51.259516 | 611 | 0.667274 |
acfa08bbe93cbe6a9329834b59cbdab7a6721b2f | 2,818 | py | Python | src/dec_pomdp_server/scripts/visualization/source_location_visualization.py | TAMS-Group/decpomdp_signal_source_localization | 3e785c6bb464a1f853b889ce4e3ac343243bc030 | [
"Apache-2.0"
] | null | null | null | src/dec_pomdp_server/scripts/visualization/source_location_visualization.py | TAMS-Group/decpomdp_signal_source_localization | 3e785c6bb464a1f853b889ce4e3ac343243bc030 | [
"Apache-2.0"
] | null | null | null | src/dec_pomdp_server/scripts/visualization/source_location_visualization.py | TAMS-Group/decpomdp_signal_source_localization | 3e785c6bb464a1f853b889ce4e3ac343243bc030 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# source_location_visualization.py
#
# Copyright 2020 Tobias Krueger
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
from visualization_msgs.msg import Marker
from geometry_msgs.msg import Vector3, Pose
from std_msgs.msg import ColorRGBA
class SourceLocationVisualizer:
SOURCE_LOCATION_ID = 0
SOURCE_LOCATION_DESC_ID =1
source_location = Marker(
type = Marker.CUBE,
ns = "SignalSource",
id = SOURCE_LOCATION_ID,
lifetime = rospy.Duration(secs=20),
scale = Vector3(0.1, 0.1, 0.1),
color = ColorRGBA(0.8, 0.0, 0.8, 1.0)
)
source_location_description = Marker(
type = Marker.TEXT_VIEW_FACING,
ns = "SignalSource",
id = SOURCE_LOCATION_DESC_ID,
lifetime = rospy.Duration(secs=20),
scale = Vector3(0.1, 0.1, 0.1),
color = ColorRGBA(0.0, 0.0, 0.5, 1.0),
text= "Signal Source Location"
)
pose = Pose()
def visualize(self):
self.source_location.header.stamp = rospy.get_rostime()
self.source_location_description.header.stamp = rospy.get_rostime()
self.markerPublisher.publish(self.source_location)
self.markerPublisher.publish(self.source_location_description)
def __init__(self, frame_id):
self.source_location.header.frame_id = frame_id
self.source_location_description.header.frame_id = frame_id
rospy.init_node('source_location_visualization')
self.markerPublisher= rospy.Publisher('source_location', Marker, queue_size=2)
router_location = rospy.get_param("/access_point_location")
self.pose.position.x = router_location['x']
self.pose.position.y = router_location['y']
self.pose.position.z = router_location['z']
self.pose.orientation.w = 1
rospy.loginfo("Pose z is %f" % self.pose.position.z)
self.source_location.pose = self.pose
self.source_location.action = Marker.ADD
self.source_location_description.pose = self.pose
self.source_location_description.action = Marker.ADD
if __name__ == '__main__':
visualizer = SourceLocationVisualizer('map')
while not rospy.is_shutdown():
visualizer.visualize()
rospy.sleep(rospy.Duration(secs=20))
| 37.573333 | 86 | 0.689496 |
acfa0909393cbd4a52b3535ac33ce5f1a8ed5ed9 | 724 | py | Python | utils/boilerplate/turtle.py | cfginn/sap-simulation-package | 73314e5380cec5c61a9fe5ff5fbafa25b9e2beac | [
"MIT"
] | null | null | null | utils/boilerplate/turtle.py | cfginn/sap-simulation-package | 73314e5380cec5c61a9fe5ff5fbafa25b9e2beac | [
"MIT"
] | null | null | null | utils/boilerplate/turtle.py | cfginn/sap-simulation-package | 73314e5380cec5c61a9fe5ff5fbafa25b9e2beac | [
"MIT"
] | null | null | null |
from pysapets.animal import Animal
import pysapets.constants as constants
import random
import logging
class Turtle(Animal):
# base health and attack values
BASE_ATTACK = 2
BASE_HEALTH = 4
def __init__(self, addAttack = 0, addHealth = 0):
# lvl 1: Faint: Give friend behind Melon Armor
# lvl 2: Faint: Give 2 friends behind Melon Armor
# lvl 3: Faint: Give 3 friends behind Melon Armor
def _run_effect(self, friends):
pass
# create ability
self.ability = Animal.Ability(self, constants.FAINT, constants.SELF, _run_effect)
super().__init__(addAttack + self.BASE_ATTACK, addHealth + self.BASE_HEALTH, animalType = constants.TURTLE, tier = 3, ability=self.ability)
| 30.166667 | 143 | 0.712707 |
acfa091abb712d530189529f7cedbe3201183083 | 259 | py | Python | unilatex/sparepart/doctype/transfer_barang/transfer_barang.py | anditsung/unilatex | 7eae2797e138eaaee50da1167d02386eaae3c251 | [
"MIT"
] | null | null | null | unilatex/sparepart/doctype/transfer_barang/transfer_barang.py | anditsung/unilatex | 7eae2797e138eaaee50da1167d02386eaae3c251 | [
"MIT"
] | null | null | null | unilatex/sparepart/doctype/transfer_barang/transfer_barang.py | anditsung/unilatex | 7eae2797e138eaaee50da1167d02386eaae3c251 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2018, unilatex and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class TransferBarang(Document):
pass
| 23.545455 | 49 | 0.783784 |
acfa0a217c8aeb6116e476a5547ea0737d958053 | 24,271 | py | Python | test/test_0702_auto.py | uhliarik/mod_md | fa00a87984cdc4b7a62a8d8e5c57d1b18bb0dde3 | [
"Apache-2.0"
] | null | null | null | test/test_0702_auto.py | uhliarik/mod_md | fa00a87984cdc4b7a62a8d8e5c57d1b18bb0dde3 | [
"Apache-2.0"
] | null | null | null | test/test_0702_auto.py | uhliarik/mod_md | fa00a87984cdc4b7a62a8d8e5c57d1b18bb0dde3 | [
"Apache-2.0"
] | null | null | null | # test auto runs against ACMEv2
import json
import os
import pytest
import re
import socket
import ssl
import sys
import time
from datetime import datetime
from TestEnv import TestEnv
from TestHttpdConf import HttpdConf
from TestCertUtil import CertUtil
def setup_module(module):
print("setup_module module:%s" % module.__name__)
TestEnv.initv2()
TestEnv.APACHE_CONF_SRC = "data/test_auto"
TestEnv.check_acme()
TestEnv.clear_store()
HttpdConf().install();
assert TestEnv.apache_start() == 0
def teardown_module(module):
print("teardown_module module:%s" % module.__name__)
assert TestEnv.apache_stop() == 0
class TestAutov2:
def setup_method(self, method):
print("setup_method: %s" % method.__name__)
TestEnv.httpd_error_log_clear();
TestEnv.clear_store()
self.test_domain = TestEnv.get_method_domain(method)
def teardown_method(self, method):
print("teardown_method: %s" % method.__name__)
# create a MD not used in any virtual host, auto drive should NOT pick it up
def test_702_001(self):
domain = self.test_domain
# generate config with one MD
domains = [ domain, "www." + domain ]
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_drive_mode( "auto" )
conf.add_md( domains )
conf.install()
#
# restart, check that MD is synched to store
assert TestEnv.apache_restart() == 0
TestEnv.check_md(domains)
stat = TestEnv.get_md_status(domain)
assert stat["watched"] == 0
#
# add vhost for MD, restart should drive it
conf.add_vhost(domains)
conf.install()
assert TestEnv.apache_restart() == 0
assert TestEnv.await_completion([ domain ] )
TestEnv.check_md_complete(domain)
stat = TestEnv.get_md_status(domain)
assert stat["watched"] == 1
cert = TestEnv.get_cert(domain)
assert domain in cert.get_san_list()
#
# challenges should have been removed
# file system needs to have correct permissions
TestEnv.check_dir_empty( TestEnv.store_challenges() )
TestEnv.check_file_permissions( domain )
# test case: same as test_7001, but with two parallel managed domains
def test_702_002(self):
domain = self.test_domain
domainA = "a-" + domain
domainB = "b-" + domain
#
# generate config with two MDs
domainsA = [ domainA, "www." + domainA ]
domainsB = [ domainB, "www." + domainB ]
conf = HttpdConf()
conf.add_admin( "admin@not-forbidden.org" )
conf.add_drive_mode( "auto" )
conf.add_md(domainsA)
conf.add_md(domainsB)
conf.add_vhost(domainsA)
conf.add_vhost(domainsB)
conf.install()
#
# restart, check that md is in store
assert TestEnv.apache_restart() == 0
TestEnv.check_md( domainsA )
TestEnv.check_md( domainsB )
#
# await drive completion, do not restart
assert TestEnv.await_completion( [ domainA, domainB ], restart=False )
# staged certificates are now visible on the status resources
status = TestEnv.get_md_status( domainA )
assert 'renewal' in status
assert 'cert' in status['renewal']
assert 'sha256-fingerprint' in status['renewal']['cert']
# restart and activate
assert TestEnv.apache_restart() == 0
# check: SSL is running OK
certA = TestEnv.get_cert(domainA)
assert domainsA == certA.get_san_list()
certB = TestEnv.get_cert(domainB)
assert domainsB == certB.get_san_list()
# test case: one MD, that covers two vhosts
def test_702_003(self):
domain = self.test_domain
nameA = "test-a." + domain
nameB = "test-b." + domain
domains = [ domain, nameA, nameB ]
#
# generate 1 MD and 2 vhosts
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_md( domains )
conf.add_vhost(nameA, docRoot="htdocs/a")
conf.add_vhost(nameB, docRoot="htdocs/b")
conf.install()
#
# create docRoot folder
self._write_res_file( os.path.join(TestEnv.APACHE_HTDOCS_DIR, "a"), "name.txt", nameA )
self._write_res_file( os.path.join(TestEnv.APACHE_HTDOCS_DIR, "b"), "name.txt", nameB )
#
# restart (-> drive), check that MD was synched and completes
assert TestEnv.apache_restart() == 0
TestEnv.check_md( domains )
assert TestEnv.await_completion( [ domain ] )
TestEnv.check_md_complete(domain)
#
# check: SSL is running OK
certA = TestEnv.get_cert(nameA)
assert nameA in certA.get_san_list()
certB = TestEnv.get_cert(nameB)
assert nameB in certB.get_san_list()
assert certA.get_serial() == certB.get_serial()
#
assert TestEnv.get_content( nameA, "/name.txt" ) == nameA
assert TestEnv.get_content( nameB, "/name.txt" ) == nameB
# test case: drive with using single challenge type explicitly
@pytest.mark.parametrize("challengeType", [
("tls-alpn-01"),
("http-01")
])
def test_702_004(self, challengeType):
domain = self.test_domain
domains = [ domain, "www." + domain ]
#
# generate 1 MD and 1 vhost
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_line( "Protocols http/1.1 acme-tls/1" )
conf.add_drive_mode( "auto" )
conf.add_ca_challenges( [ challengeType ] )
conf.add_md( domains )
conf.add_vhost(domains)
conf.install()
#
# restart (-> drive), check that MD was synched and completes
assert TestEnv.apache_restart() == 0
TestEnv.check_md(domains)
assert TestEnv.await_completion( [ domain ] )
TestEnv.check_md_complete(domain)
#
# check SSL running OK
cert = TestEnv.get_cert(domain)
assert domain in cert.get_san_list()
# test case: drive_mode manual, check that server starts, but requests to domain are 503'd
def test_702_005(self):
domain = self.test_domain
nameA = "test-a." + domain
domains = [ domain, nameA ]
#
# generate 1 MD and 1 vhost
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_drive_mode( "manual" )
conf.add_md( domains )
conf.add_vhost(nameA, docRoot="htdocs/a")
conf.install()
#
# create docRoot folder
self._write_res_file(os.path.join(TestEnv.APACHE_HTDOCS_DIR, "a"), "name.txt", nameA)
#
# restart, check that md is in store
assert TestEnv.apache_restart() == 0
TestEnv.check_md(domains)
#
# check: that request to domains give 503 Service Unavailable
cert1 = TestEnv.get_cert(nameA)
assert nameA in cert1.get_san_list()
assert TestEnv.getStatus(nameA, "/name.txt") == 503
#
# check temporary cert from server
cert2 = CertUtil( TestEnv.path_fallback_cert( domain ) )
assert cert1.get_serial() == cert2.get_serial(), \
"Unexpected temporary certificate on vhost %s. Expected cn: %s , but found cn: %s" % ( nameA, cert2.get_cn(), cert1.get_cn() )
# test case: drive MD with only invalid challenges, domains should stay 503'd
def test_702_006(self):
domain = self.test_domain
nameA = "test-a." + domain
domains = [ domain, nameA ]
#
# generate 1 MD, 1 vhost
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_ca_challenges([ "invalid-01", "invalid-02" ])
conf.add_md( domains )
conf.add_vhost(nameA, docRoot="htdocs/a")
conf.install()
#
# create docRoot folder
self._write_res_file(os.path.join(TestEnv.APACHE_HTDOCS_DIR, "a"), "name.txt", nameA)
#
# restart, check that md is in store
assert TestEnv.apache_restart() == 0
TestEnv.check_md(domains)
# await drive completion
md = TestEnv.await_error(domain)
assert md
assert md['renewal']['errors'] > 0
assert md['renewal']['last']['problem'] == 'challenge-mismatch'
assert 'account' not in md['ca']
#
# check: that request to domains give 503 Service Unavailable
cert = TestEnv.get_cert(nameA)
assert nameA in cert.get_san_list()
assert TestEnv.getStatus(nameA, "/name.txt") == 503
# Specify a non-working http proxy
def test_702_008(self):
domain = self.test_domain
domains = [ domain ]
#
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_drive_mode( "always" )
conf.add_http_proxy( "http://localhost:1" )
conf.add_md( domains )
conf.install()
#
# - restart (-> drive)
assert TestEnv.apache_restart() == 0
# await drive completion
md = TestEnv.await_error(domain)
assert md
assert md['renewal']['errors'] > 0
assert md['renewal']['last']['status-description'] == 'Connection refused'
assert 'account' not in md['ca']
# Specify a valid http proxy
def test_702_008a(self):
domain = self.test_domain
domains = [ domain ]
#
conf = HttpdConf(proxy=True)
conf.add_admin( "admin@" + domain )
conf.add_drive_mode( "always" )
conf.add_http_proxy( "http://localhost:%s" % TestEnv.HTTP_PROXY_PORT)
conf.add_md( domains )
conf.install()
#
# - restart (-> drive), check that md is in store
assert TestEnv.apache_restart() == 0
assert TestEnv.await_completion( [ domain ] )
assert TestEnv.apache_restart() == 0
TestEnv.check_md_complete(domain)
# Force cert renewal due to critical remaining valid duration
# Assert that new cert activation is delayed
def test_702_009(self):
domain = self.test_domain
domains = [ domain ]
#
# prepare md
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_drive_mode( "auto" )
conf.add_renew_window( "10d" )
conf.add_md( domains )
conf.add_vhost(domain)
conf.install()
#
# restart (-> drive), check that md+cert is in store, TLS is up
assert TestEnv.apache_restart() == 0
assert TestEnv.await_completion( [ domain ] )
TestEnv.check_md_complete(domain)
cert1 = CertUtil( TestEnv.store_domain_file(domain, 'pubcert.pem') )
# compare with what md reports as status
stat = TestEnv.get_certificate_status(domain);
assert stat['serial'] == cert1.get_serial()
#
# create self-signed cert, with critical remaining valid duration -> drive again
TestEnv.create_self_signed_cert( [domain], { "notBefore": -120, "notAfter": 2 }, serial=7029)
cert3 = CertUtil( TestEnv.store_domain_file(domain, 'pubcert.pem') )
assert cert3.get_serial() == '1B75'
assert TestEnv.apache_restart() == 0
stat = TestEnv.get_certificate_status(domain);
assert stat['serial'] == cert3.get_serial()
#
# cert should renew and be different afterwards
assert TestEnv.await_completion( [ domain ], must_renew=True)
stat = TestEnv.get_certificate_status(domain);
assert stat['serial'] != cert3.get_serial()
# test case: drive with an unsupported challenge due to port availability
def test_702_010(self):
domain = self.test_domain
domains = [ domain, "www." + domain ]
#
# generate 1 MD and 1 vhost, map port 80 onto itself where the server does not listen
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_drive_mode( "auto" )
conf.add_ca_challenges( [ "http-01" ] )
conf._add_line("MDPortMap 80:99")
conf.add_md( domains )
conf.add_vhost( domains )
conf.install()
assert TestEnv.apache_restart() == 0
TestEnv.check_md(domains)
assert not TestEnv.is_renewing( domain )
#
# now the same with a 80 mapped to a supported port
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_drive_mode( "auto" )
conf.add_ca_challenges( [ "http-01" ] )
conf._add_line("MDPortMap 80:%s" % TestEnv.HTTP_PORT)
conf.add_md( domains )
conf.add_vhost( domains )
conf.install()
assert TestEnv.apache_restart() == 0
TestEnv.check_md(domains)
assert TestEnv.await_completion( [ domain ] )
def test_702_011(self):
domain = self.test_domain
domains = [ domain, "www." + domain ]
#
# generate 1 MD and 1 vhost, map port 80 onto itself where the server does not listen
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_line( "Protocols http/1.1 acme-tls/1" )
conf.add_drive_mode( "auto" )
conf.add_ca_challenges( [ "tls-alpn-01" ] )
conf._add_line("MDPortMap https:99")
conf.add_md( domains )
conf.add_vhost(domains)
conf.install()
assert TestEnv.apache_restart() == 0
TestEnv.check_md(domains)
assert not TestEnv.is_renewing( domain )
#
# now the same with a 80 mapped to a supported port
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_line( "Protocols http/1.1 acme-tls/1" )
conf.add_drive_mode( "auto" )
conf.add_ca_challenges( [ "tls-alpn-01" ] )
conf._add_line("MDPortMap https:%s" % TestEnv.HTTPS_PORT)
conf.add_md( domains )
conf.add_vhost(domains)
conf.install()
assert TestEnv.apache_restart() == 0
TestEnv.check_md(domains)
assert TestEnv.await_completion( [ domain ] )
# test case: one MD with several dns names. sign up. remove the *first* name
# in the MD. restart. should find and keep the existing MD.
# See: https://github.com/icing/mod_md/issues/68
def test_702_030(self):
domain = self.test_domain
nameX = "test-x." + domain
nameA = "test-a." + domain
nameB = "test-b." + domain
domains = [ nameX, nameA, nameB ]
#
# generate 1 MD and 2 vhosts
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_md( domains )
conf.add_vhost(nameA)
conf.add_vhost(nameB)
conf.install()
#
# restart (-> drive), check that MD was synched and completes
assert TestEnv.apache_restart() == 0
TestEnv.check_md(domains)
assert TestEnv.await_completion( [ nameX ] )
TestEnv.check_md_complete(nameX)
#
# check: SSL is running OK
certA = TestEnv.get_cert(nameA)
assert nameA in certA.get_san_list()
certB = TestEnv.get_cert(nameB)
assert nameB in certB.get_san_list()
assert certA.get_serial() == certB.get_serial()
#
# change MD by removing 1st name
new_list = [ nameA, nameB ]
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_md( new_list )
conf.add_vhost(nameA)
conf.add_vhost(nameB)
conf.install()
# restart, check that host still works and kept the cert
assert TestEnv.apache_restart() == 0
TestEnv.check_md( new_list )
status = TestEnv.get_certificate_status( nameA )
assert status['serial'] == certA.get_serial()
# test case: Same as 7030, but remove *and* add another at the same time.
# restart. should find and keep the existing MD and renew for additional name.
# See: https://github.com/icing/mod_md/issues/68
def test_702_031(self):
domain = self.test_domain
nameX = "test-x." + domain
nameA = "test-a." + domain
nameB = "test-b." + domain
nameC = "test-c." + domain
domains = [ nameX, nameA, nameB ]
#
# generate 1 MD and 2 vhosts
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_md( domains )
conf.add_vhost(nameA)
conf.add_vhost(nameB)
conf.install()
#
# restart (-> drive), check that MD was synched and completes
assert TestEnv.apache_restart() == 0
TestEnv.check_md( domains )
assert TestEnv.await_completion( [ nameX ] )
TestEnv.check_md_complete(nameX)
#
# check: SSL is running OK
certA = TestEnv.get_cert(nameA)
assert nameA in certA.get_san_list()
certB = TestEnv.get_cert(nameB)
assert nameB in certB.get_san_list()
assert certA.get_serial() == certB.get_serial()
#
# change MD by removing 1st name and adding another
new_list = [ nameA, nameB, nameC ]
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_md( new_list )
conf.add_vhost(nameA)
conf.add_vhost(nameB)
conf.install()
# restart, check that host still works and have new cert
assert TestEnv.apache_restart() == 0
TestEnv.check_md( new_list )
assert TestEnv.await_completion( [ nameA ] )
#
certA2 = TestEnv.get_cert(nameA)
assert nameA in certA2.get_san_list()
assert certA.get_serial() != certA2.get_serial()
# test case: create two MDs, move them into one
# see: <https://bz.apache.org/bugzilla/show_bug.cgi?id=62572>
def test_702_032(self):
domain = self.test_domain
name1 = "server1." + domain
name2 = "server2.b" + domain # need a separate TLD to avoid rate limites
#
# generate 2 MDs and 2 vhosts
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf._add_line( "MDMembers auto" )
conf.add_md([ name1 ])
conf.add_md([ name2 ])
conf.add_vhost(name1)
conf.add_vhost(name2)
conf.install()
#
# restart (-> drive), check that MD was synched and completes
assert TestEnv.apache_restart() == 0
TestEnv.check_md([ name1 ])
TestEnv.check_md([ name2 ])
assert TestEnv.await_completion( [ name1, name2 ] )
TestEnv.check_md_complete(name2)
#
# check: SSL is running OK
cert1 = TestEnv.get_cert(name1)
assert name1 in cert1.get_san_list()
cert2 = TestEnv.get_cert(name2)
assert name2 in cert2.get_san_list()
#
# remove second md and vhost, add name2 to vhost1
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf._add_line( "MDMembers auto" )
conf.add_md( [ name1 ] )
conf.add_vhost([ name1, name2 ])
conf.install()
assert TestEnv.apache_restart() == 0
TestEnv.check_md([ name1, name2 ])
assert TestEnv.await_completion([ name1 ])
#
cert1b = TestEnv.get_cert(name1)
assert name1 in cert1b.get_san_list()
assert name2 in cert1b.get_san_list()
assert cert1.get_serial() != cert1b.get_serial()
# test case: test "tls-alpn-01" challenge handling
def test_702_040(self):
domain = self.test_domain
domains = [ domain, "www." + domain ]
#
# generate 1 MD and 1 vhost
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_line( "LogLevel core:debug" )
conf.add_line( "LogLevel ssl:debug" )
conf.add_line( "Protocols http/1.1 acme-tls/1" )
conf.add_drive_mode( "auto" )
conf.add_ca_challenges( [ "tls-alpn-01" ] )
conf.add_md( domains )
conf.add_vhost(domains)
conf.install()
#
# restart (-> drive), check that MD was synched and completes
assert TestEnv.apache_restart() == 0
TestEnv.check_md(domains)
# check that acme-tls/1 is available for all domains
stat = TestEnv.get_md_status(domain)
assert stat["proto"]["acme-tls/1"] == domains
assert TestEnv.await_completion( [ domain ] )
TestEnv.check_md_complete(domain)
#
# check SSL running OK
cert = TestEnv.get_cert(domain)
assert domain in cert.get_san_list()
# test case: test "tls-alpn-01" without enabling 'acme-tls/1' challenge protocol
def test_702_041(self):
domain = self.test_domain
domains = [ domain, "www." + domain ]
#
# generate 1 MD and 1 vhost
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_line( "LogLevel core:debug" )
conf.add_line( "LogLevel ssl:debug" )
conf.add_drive_mode( "auto" )
conf.add_ca_challenges( [ "tls-alpn-01" ] )
conf.add_md( domains )
conf.add_vhost(domains)
conf.install()
#
# restart (-> drive), check that MD job shows errors
# and that missing proto is detected
assert TestEnv.apache_restart() == 0
TestEnv.check_md(domains)
# check that acme-tls/1 is available for none of the domains
stat = TestEnv.get_md_status(domain)
assert stat["proto"]["acme-tls/1"] == []
# test case: 2.4.40 mod_ssl stumbles over a SSLCertificateChainFile when installing
# a fallback certificate
def test_702_042(self):
domain = self.test_domain
dns_list = [ domain ]
conf = HttpdConf()
conf.add_admin( "admin@" + domain )
conf.add_line( "LogLevel core:debug" )
conf.add_line( "LogLevel ssl:debug" )
conf.add_line( "SSLCertificateChainFile %s" % (self._path_conf_ssl("valid_cert.pem")) )
conf.add_drive_mode( "auto" )
conf.add_md( dns_list )
conf.add_vhost( TestEnv.HTTPS_PORT, dns_list)
conf.install()
assert TestEnv.apache_restart() == 0
# Make a setup using the base server. It will use http-01 challenge.
def test_702_050(self):
domain = self.test_domain
conf = HttpdConf()
conf.add_line("""
MDBaseServer on
ServerAdmin admin@%s
ServerName %s
""" % (domain, domain))
conf.add_md( [ domain ] )
conf.install()
assert TestEnv.apache_restart() == 0
assert TestEnv.await_completion( [ domain ] )
# Make a setup using the base server without http:, will fail.
def test_702_051(self):
domain = self.test_domain
conf = HttpdConf()
conf.add_line("""
MDBaseServer on
MDPortMap http:-
ServerAdmin admin@%s
ServerName %s
""" % (domain, domain))
conf.add_md( [ domain ] )
conf.install()
assert TestEnv.apache_restart() == 0
assert TestEnv.await_error( domain )
# Make a setup using the base server without http:, but with acme-tls/1, should work.
def test_702_052(self):
domain = self.test_domain
conf = HttpdConf()
conf.add_line("""
MDBaseServer on
MDPortMap http:-
Protocols h2 http/1.1 acme-tls/1
ServerAdmin admin@%s
ServerName %s
SSLEngine on
""" % (domain, domain))
conf.add_md( [ domain ] )
conf.install()
assert TestEnv.apache_restart() == 0
stat = TestEnv.get_md_status(domain)
assert stat["proto"]["acme-tls/1"] == [ domain ]
assert TestEnv.await_completion( [ domain ] )
# --------- _utils_ ---------
def _write_res_file(self, docRoot, name, content):
if not os.path.exists(docRoot):
os.makedirs(docRoot)
open(os.path.join(docRoot, name), "w").write(content)
def _path_conf_ssl(self, name):
return os.path.join(TestEnv.APACHE_SSL_DIR, name)
| 37.34 | 138 | 0.598245 |
acfa0aa5852ad7a0e6b8f9c75a59cbce52f7a5fe | 7,411 | py | Python | testing/AbaqusTestGenerationScripts/mps_aba_model/mps_abaqus_setup.py | KnutAM/matmodfit | 8e2712def4938f564e93f9e40d975c465829a26f | [
"MIT"
] | 4 | 2019-06-21T17:50:46.000Z | 2022-03-30T11:53:32.000Z | testing/AbaqusTestGenerationScripts/mps_aba_model/mps_abaqus_setup.py | KnutAM/matmodfit | 8e2712def4938f564e93f9e40d975c465829a26f | [
"MIT"
] | 1 | 2020-08-13T13:47:13.000Z | 2020-08-13T16:08:44.000Z | testing/AbaqusTestGenerationScripts/mps_aba_model/mps_abaqus_setup.py | KnutAM/matmodfit | 8e2712def4938f564e93f9e40d975c465829a26f | [
"MIT"
] | 3 | 2019-06-21T17:50:49.000Z | 2022-03-30T11:53:25.000Z | # Build a 1x1x1 cube for testing mp simulation versus Abaqus
from mesh import *
from interaction import *
from sketch import *
from part import *
from material import *
from section import *
from assembly import *
from step import *
from load import *
from mesh import *
from job import *
import mps_abaqus_val as val
reload(val)
def setup_model():
my_model = mdb.models.values()[0]
# Create sketch
my_sketch = my_model.ConstrainedSketch(
name='__profile__', sheetSize=20)
my_sketch.rectangle(point1=(0.0, 0.0), point2=(1.0, 1.0))
# Create the solid/shell
my_part = my_model.Part(name='CUBE', dimensionality=THREE_D,
type=DEFORMABLE_BODY)
my_part.BaseSolidExtrude(sketch=my_sketch, depth=1.0)
del my_sketch
# Create the material
my_material = create_material(my_model)
# Section
my_section = my_model.HomogeneousSolidSection(name='Section-1',
material=val.MTRL_NAME, thickness=None)
my_cell = my_part.cells.findAt((( 0.5, 0.5, 0.5),),)
my_part.SectionAssignment(
region=Region(cells = my_cell),
sectionName='Section-1',
thicknessAssignment=FROM_SECTION)
# Create mesh
create_mesh(my_part, my_cell)
# Create assembly
my_model.rootAssembly.DatumCsysByDefault(CARTESIAN)
my_instance = my_model.rootAssembly.Instance(dependent=ON, name=val.INST_NAME, part=my_part)
my_model.rootAssembly.regenerate()
my_part.Set(faces=my_part.faces.findAt(((0.5, 0.5, 0.0),)),
name=val.BOT_SET_NAME)
my_part.Set(faces=my_part.faces.findAt(((0.5, 0.5, 1.0),)),
name=val.TOP_SET_NAME)
my_part.Set(edges=my_part.edges.findAt(((0.5, 0.0, 0.0),)),
name=val.XAX_SET_NAME)
my_part.Set(edges=my_part.edges.findAt(((0.0, 0.5, 0.0),)),
name=val.YAX_SET_NAME)
my_part.Set(edges=my_part.edges.findAt(((0.0, 0.0, 0.5),)),
name=val.ZAX_SET_NAME)
# Get corners
get_corners(my_part)
# Create loading steps
create_loading(my_model, my_instance)
# Setup history and field output requests
my_model.HistoryOutputRequest(
name='AXIAL_OUTPUT', createStepName='STEP-1',
region=my_instance.sets['ALL'],
variables=['U1', 'U2', 'U3', 'RF1', 'RF2', 'RF3'])
my_model.FieldOutputRequest(name='DefaultFieldOutput',
createStepName='STEP-1')
# Create job
my_job = mdb.Job(name=val.JOB_NAME, model=my_model, userSubroutine=val.umatpath)
return my_job
def create_material(the_model):
the_material = the_model.Material(name=val.MTRL_NAME)
if val.mtrl_model == 'Chaboche_builtin':
E = val.mpar[0]
nu = val.mpar[1]
sy0 = val.mpar[2]
Hiso = val.mpar[3]
kinfinv = val.mpar[4]
Hkin = val.mpar[5]
binfinv = val.mpar[6]
the_material.Elastic(table=((E, nu),))
the_material.Plastic(table=((sy0, Hkin, Hkin*binfinv),),
hardening=COMBINED, dataType=PARAMETERS,
numBackstresses=1)
the_material.plastic.CyclicHardening(
table=((sy0, 1.0/kinfinv, Hiso*kinfinv),),
parameters=ON)
else:
the_material.UserMaterial(type=MECHANICAL, unsymm=OFF,
mechanicalConstants=val.mpar)
the_material.Depvar(n=val.nstatv)
def create_mesh(the_part, the_cell):
et = ElemType(elemCode=C3D8, elemLibrary=STANDARD,
secondOrderAccuracy=OFF, distortionControl=DEFAULT)
the_part.setElementType(elemTypes=(et,), regions=(the_cell,))
the_part.seedPart(size=1.0, deviationFactor=0.1, minSizeFactor=0.1)
the_part.generateMesh()
def get_corners(the_part):
names = ['ALL', 'F11_PT', 'F22_PT', 'F33_PT', 'F13_PT', 'F23_PT', 'F11+F13_PT', 'F22+F23_PT', 'BOTZ',
'P11_PT', 'P22_PT', 'P33_PT']
o = (0,0,0)
a = (0,0,1)
b = (1,0,1)
c = (0,1,1)
d = (1,1,1)
e = (1,0,0)
f = (0,1,0)
g = (1,1,0)
pts = ((o,a,b,c,d,e,f,g),(e,g), (f,g), (a,b,c,d), (a,c), (a,b), (b,d), (c,d), (e,f,g),
(b,d,e,g), (c,d,f,g), (a,b,c,d))
the_part.Set(vertices=the_part.vertices.findAt((o,)),name='ORIGIN')
the_part.Set(vertices=the_part.vertices.findAt((f,)),name='fx0')
the_part.Set(vertices=the_part.vertices.findAt((e,)),name='ey0')
for lp,n in zip(pts,names):
v = []
for p in lp:
v.append(the_part.vertices.findAt((p,)))
the_part.Set(vertices=v,name=n)
def create_loading(the_model, the_instance):
# Fixed points
# Origin should always be fixed
the_model.DisplacementBC(name='ORIGIN', createStepName='Initial',
region=the_instance.sets['ORIGIN'],
u1=0.0, u2=0.0, u3=0.0)
the_model.DisplacementBC(name='BOTZ', createStepName='Initial',
region=the_instance.sets['BOTZ'], u3=0.0)
the_model.DisplacementBC(name='fx0', createStepName='Initial',
region=the_instance.sets['fx0'], u1=0.0)
the_model.DisplacementBC(name='ey0', createStepName='Initial',
region=the_instance.sets['ey0'], u2=0.0)
# Load step
the_model.StaticStep(
name='STEP-1', previous='Initial',timePeriod=val.total_time,
timeIncrementationMethod=FIXED, initialInc=val.dt,
maxNumInc=int(val.total_time/val.dt + 2), nlgeom=val.nlgeom)
# Prescribed points
the_model.DisplacementBC(name='F11', createStepName='STEP-1',
region=the_instance.sets['F11_PT'],
u1=val.F11-1.0)
the_model.DisplacementBC(name='F22', createStepName='STEP-1',
region=the_instance.sets['F22_PT'],
u2=val.F22-1.0)
the_model.DisplacementBC(name='F33', createStepName='STEP-1',
region=the_instance.sets['F33_PT'],
u3=val.F33-1.0)
the_model.DisplacementBC(name='F13', createStepName='STEP-1',
region=the_instance.sets['F13_PT'],
u1=val.F13)
the_model.DisplacementBC(name='F23', createStepName='STEP-1',
region=the_instance.sets['F23_PT'],
u2=val.F23)
the_model.DisplacementBC(name='F11+F13', createStepName='STEP-1',
region=the_instance.sets['F11+F13_PT'],
u1=val.F11-1.0+val.F13)
the_model.DisplacementBC(name='F22+F23', createStepName='STEP-1',
region=the_instance.sets['F22+F23_PT'],
u2=val.F22-1.0+val.F23)
if __name__ == '__main__':
setup_model() # run the main function | 37.619289 | 105 | 0.551612 |
acfa0cba7a4843a11cdd6bd7e386d8c57ade2d35 | 126 | py | Python | CIFAR10/models/__init__.py | yhhhli/RegNet-Pytorch | 6035ff822595338efec9a4de21134b134c7dcaa8 | [
"MIT"
] | 35 | 2020-04-27T12:21:43.000Z | 2021-09-13T19:32:35.000Z | CIFAR10/models/__init__.py | yhhhli/RegNet-Pytorch | 6035ff822595338efec9a4de21134b134c7dcaa8 | [
"MIT"
] | 2 | 2020-05-17T03:25:42.000Z | 2020-07-15T09:00:23.000Z | CIFAR10/models/__init__.py | yhhhli/RegNet-Pytorch | 6035ff822595338efec9a4de21134b134c7dcaa8 | [
"MIT"
] | 8 | 2020-04-29T02:23:57.000Z | 2022-02-28T20:39:31.000Z | from .regnet import regnet_200m, regnet_400m, regnet_600m, regnet_800m, regnet_1600m, regnet_3200m, regnet_4000m, regnet_6400m | 126 | 126 | 0.857143 |
acfa0db4ac4b31c754701a9591f7e18e243f02a2 | 4,834 | py | Python | Alerts/backend/alerts.py | Akhady/Nemea-GUI | 34820f13a588ed18529200d31c7d16d3f53f2020 | [
"BSD-3-Clause"
] | null | null | null | Alerts/backend/alerts.py | Akhady/Nemea-GUI | 34820f13a588ed18529200d31c7d16d3f53f2020 | [
"BSD-3-Clause"
] | null | null | null | Alerts/backend/alerts.py | Akhady/Nemea-GUI | 34820f13a588ed18529200d31c7d16d3f53f2020 | [
"BSD-3-Clause"
] | 1 | 2019-06-05T08:04:04.000Z | 2019-06-05T08:04:04.000Z | # Own classes to connect and work with database
from liberouterapi import db, auth, config
from liberouterapi.dbConnector import dbConnector
# Data manipulation
import json
from flask import request
import re
import dateutil.parser
# Connect and select alerts collection
alerts_db = dbConnector('alerts')
alerts_coll = alerts_db.db[config['alerts']['collection']]
@auth.required()
def set_confirmed():
data = request.json
ids = data['ids']
return set_status(ids, 0)
@auth.required()
def set_false_positive():
data = request.json
ids = data['ids']
return set_status(ids, 1)
def set_status(ids, status):
try:
alerts_coll.update_many({'ID': {'$in': ids}}, {'$set': {'Status': status, "New": False}})
return json.dumps({"success": True, "errCode": 200})
except Exception:
return json.dumps({"success": False, "errCode": 500})
@auth.required()
def delete_alerts():
data = request.json
ids = data['ids']
try:
alerts_coll.delete_many({'ID': {'$in': ids}})
return json.dumps({"success": True, "errCode": 200})
except Exception:
return json.dumps({"success": False, "errCode": 500})
@auth.required()
def get_detail_of_alert():
record_id = request.args.get('id')
alerts_coll.update_one({'ID': record_id}, {"$set": {"New": False}})
record = alerts_coll.find_one({'ID': record_id}, {'_id': 0})
return json.dumps(record)
def set_status_comment(record_id):
data = request.json
status_comment = data['StatusComment']
try:
alerts_coll.update_many({'ID': record_id}, {'$set': {'StatusComment': status_comment}})
return json.dumps({"success": True, "errCode": 200})
except Exception:
return json.dumps({"success": False, "errCode": 500})
# Received filter format
# [{'field': 'columnName', 'field2': 'submergedColumnName', 'predicate': '$...', 'value': ['...', ...]}, ...]
# 'predicate': $eq, $ne, $lt, $lte, $gt, $gte, $in, $nin, $exists
@auth.required()
def get_limited_number_of_records():
page = int(request.args.get('page'))
items = int(request.args.get('items'))
return get_alerts(page, items, {})
@auth.required()
def get_filtered_alerts():
data = request.json
page = int(data['page'])
items = int(data['items'])
received_filter = data['filter']
return get_alerts(page, items, parse_filter_to_query(received_filter))
def get_alerts(page, items, query):
if page < 1:
return json.dumps({"success": False, "errCode": 500})
first_item = items * (page - 1)
alerts_coll.update_many({"New": {"$exists": False}}, {"$set": {"New": True}})
project = {'_id': 0, 'DetectTime': 1, 'Category': 1, 'FlowCount': 1, 'Status': 1, 'ID': 1, 'New': 1,
'Source': {'$setUnion': ['$Source.IP4', '$Source.IP6']},
'Target': {'$setUnion': ['$Target.IP4', '$Target.IP6']}}
records = list(alerts_coll.aggregate([{'$match': query}, {'$project': project}, {'$sort': {'DetectTime': -1}},
{'$skip': first_item}, {'$limit': items}], allowDiskUse=True))
number_of_records = alerts_coll.find(query).count()
for record in records:
if record['DetectTime'] is not None:
record['DetectTime'] = format_datetime(record['DetectTime'])
if record['Source'] is not None:
record['Source'] = sum(record['Source'], [])
record['Source'] = list(set(record['Source']))
else:
record['Source'] = []
if record['Target'] is not None:
record['Target'] = sum(record['Target'], [])
record['Target'] = list(set(record['Target']))
else:
record['Target'] = []
return json.dumps({"count": number_of_records, "data": records})
def parse_filter_to_query(received_filter):
query = {'$and': []}
for x in received_filter:
if x['predicate'] == '$exists':
x['value'] = bool(x['value'])
if x['predicate'] == '$wildcard':
x['predicate'] = '$regex'
x['value'] = parse_wildcard_to_regex(x['value'])
expression = {x['field']: {}}
if 'field2' in x:
expression[x['field']] = {'$elemMatch': {x['field2']: {x['predicate']: x['value']}}}
else:
expression[x['field']] = {x['predicate']: x['value']}
query['$and'].append(expression)
return query
def parse_wildcard_to_regex(wild_card):
wild_card = re.sub(r'\.', '\.', wild_card)
wild_card = re.sub(r'\*', '(?:[0-9]{1,3})', wild_card)
return '^' + wild_card + '$'
def format_datetime(datetime_string):
datetime = dateutil.parser.parse(datetime_string)
datetime.strftime("%Y-%m-%d %H:%M:%S")
datetime = datetime.replace(tzinfo=None).replace(microsecond=0)
return str(datetime)
| 32.442953 | 114 | 0.600331 |
acfa0dd5c6b2dd2b153b8cc0dfb5e8721a8d0215 | 20,346 | py | Python | docs/doxygen/doxyxml/generated/compound.py | cbdonohue/gr-equalizers | 3b5f86238afff7becfb9988dc6a27a35468d179f | [
"BSD-3-Clause"
] | 15 | 2022-01-26T23:09:23.000Z | 2022-02-17T15:50:32.000Z | docs/doxygen/doxyxml/generated/compound.py | cbdonohue/gr-equalizers | 3b5f86238afff7becfb9988dc6a27a35468d179f | [
"BSD-3-Clause"
] | 7 | 2020-02-06T11:18:58.000Z | 2021-02-05T13:20:05.000Z | docs/doxygen/doxyxml/generated/compound.py | cbdonohue/gr-equalizers | 3b5f86238afff7becfb9988dc6a27a35468d179f | [
"BSD-3-Clause"
] | 4 | 2020-01-21T14:47:10.000Z | 2022-03-09T08:39:06.000Z | #!/usr/bin/env python
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from xml.dom import minidom
from xml.dom import Node
import sys
from . import compoundsuper as supermod
from .compoundsuper import MixedContainer
class DoxygenTypeSub(supermod.DoxygenType):
def __init__(self, version=None, compounddef=None):
supermod.DoxygenType.__init__(self, version, compounddef)
def find(self, details):
return self.compounddef.find(details)
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class compounddefTypeSub(supermod.compounddefType):
def __init__(self, kind=None, prot=None, id=None, compoundname='', title='', basecompoundref=None, derivedcompoundref=None, includes=None, includedby=None, incdepgraph=None, invincdepgraph=None, innerdir=None, innerfile=None, innerclass=None, innernamespace=None, innerpage=None, innergroup=None, templateparamlist=None, sectiondef=None, briefdescription=None, detaileddescription=None, inheritancegraph=None, collaborationgraph=None, programlisting=None, location=None, listofallmembers=None):
supermod.compounddefType.__init__(self, kind, prot, id, compoundname, title, basecompoundref, derivedcompoundref, includes, includedby, incdepgraph, invincdepgraph, innerdir, innerfile, innerclass, innernamespace, innerpage, innergroup, templateparamlist, sectiondef, briefdescription, detaileddescription, inheritancegraph, collaborationgraph, programlisting, location, listofallmembers)
def find(self, details):
if self.id == details.refid:
return self
for sectiondef in self.sectiondef:
result = sectiondef.find(details)
if result:
return result
supermod.compounddefType.subclass = compounddefTypeSub
# end class compounddefTypeSub
class listofallmembersTypeSub(supermod.listofallmembersType):
def __init__(self, member=None):
supermod.listofallmembersType.__init__(self, member)
supermod.listofallmembersType.subclass = listofallmembersTypeSub
# end class listofallmembersTypeSub
class memberRefTypeSub(supermod.memberRefType):
def __init__(self, virt=None, prot=None, refid=None, ambiguityscope=None, scope='', name=''):
supermod.memberRefType.__init__(self, virt, prot, refid, ambiguityscope, scope, name)
supermod.memberRefType.subclass = memberRefTypeSub
# end class memberRefTypeSub
class compoundRefTypeSub(supermod.compoundRefType):
def __init__(self, virt=None, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.compoundRefType.__init__(self, mixedclass_, content_)
supermod.compoundRefType.subclass = compoundRefTypeSub
# end class compoundRefTypeSub
class reimplementTypeSub(supermod.reimplementType):
def __init__(self, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.reimplementType.__init__(self, mixedclass_, content_)
supermod.reimplementType.subclass = reimplementTypeSub
# end class reimplementTypeSub
class incTypeSub(supermod.incType):
def __init__(self, local=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.incType.__init__(self, mixedclass_, content_)
supermod.incType.subclass = incTypeSub
# end class incTypeSub
class refTypeSub(supermod.refType):
def __init__(self, prot=None, refid=None, valueOf_='', mixedclass_=None, content_=None):
supermod.refType.__init__(self, mixedclass_, content_)
supermod.refType.subclass = refTypeSub
# end class refTypeSub
class refTextTypeSub(supermod.refTextType):
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
supermod.refTextType.__init__(self, mixedclass_, content_)
supermod.refTextType.subclass = refTextTypeSub
# end class refTextTypeSub
class sectiondefTypeSub(supermod.sectiondefType):
def __init__(self, kind=None, header='', description=None, memberdef=None):
supermod.sectiondefType.__init__(self, kind, header, description, memberdef)
def find(self, details):
for memberdef in self.memberdef:
if memberdef.id == details.refid:
return memberdef
return None
supermod.sectiondefType.subclass = sectiondefTypeSub
# end class sectiondefTypeSub
class memberdefTypeSub(supermod.memberdefType):
def __init__(self, initonly=None, kind=None, volatile=None, const=None, raise_=None, virt=None, readable=None, prot=None, explicit=None, new=None, final=None, writable=None, add=None, static=None, remove=None, sealed=None, mutable=None, gettable=None, inline=None, settable=None, id=None, templateparamlist=None, type_=None, definition='', argsstring='', name='', read='', write='', bitfield='', reimplements=None, reimplementedby=None, param=None, enumvalue=None, initializer=None, exceptions=None, briefdescription=None, detaileddescription=None, inbodydescription=None, location=None, references=None, referencedby=None):
supermod.memberdefType.__init__(self, initonly, kind, volatile, const, raise_, virt, readable, prot, explicit, new, final, writable, add, static, remove, sealed, mutable, gettable, inline, settable, id, templateparamlist, type_, definition, argsstring, name, read, write, bitfield, reimplements, reimplementedby, param, enumvalue, initializer, exceptions, briefdescription, detaileddescription, inbodydescription, location, references, referencedby)
supermod.memberdefType.subclass = memberdefTypeSub
# end class memberdefTypeSub
class descriptionTypeSub(supermod.descriptionType):
def __init__(self, title='', para=None, sect1=None, internal=None, mixedclass_=None, content_=None):
supermod.descriptionType.__init__(self, mixedclass_, content_)
supermod.descriptionType.subclass = descriptionTypeSub
# end class descriptionTypeSub
class enumvalueTypeSub(supermod.enumvalueType):
def __init__(self, prot=None, id=None, name='', initializer=None, briefdescription=None, detaileddescription=None, mixedclass_=None, content_=None):
supermod.enumvalueType.__init__(self, mixedclass_, content_)
supermod.enumvalueType.subclass = enumvalueTypeSub
# end class enumvalueTypeSub
class templateparamlistTypeSub(supermod.templateparamlistType):
def __init__(self, param=None):
supermod.templateparamlistType.__init__(self, param)
supermod.templateparamlistType.subclass = templateparamlistTypeSub
# end class templateparamlistTypeSub
class paramTypeSub(supermod.paramType):
def __init__(self, type_=None, declname='', defname='', array='', defval=None, briefdescription=None):
supermod.paramType.__init__(self, type_, declname, defname, array, defval, briefdescription)
supermod.paramType.subclass = paramTypeSub
# end class paramTypeSub
class linkedTextTypeSub(supermod.linkedTextType):
def __init__(self, ref=None, mixedclass_=None, content_=None):
supermod.linkedTextType.__init__(self, mixedclass_, content_)
supermod.linkedTextType.subclass = linkedTextTypeSub
# end class linkedTextTypeSub
class graphTypeSub(supermod.graphType):
def __init__(self, node=None):
supermod.graphType.__init__(self, node)
supermod.graphType.subclass = graphTypeSub
# end class graphTypeSub
class nodeTypeSub(supermod.nodeType):
def __init__(self, id=None, label='', link=None, childnode=None):
supermod.nodeType.__init__(self, id, label, link, childnode)
supermod.nodeType.subclass = nodeTypeSub
# end class nodeTypeSub
class childnodeTypeSub(supermod.childnodeType):
def __init__(self, relation=None, refid=None, edgelabel=None):
supermod.childnodeType.__init__(self, relation, refid, edgelabel)
supermod.childnodeType.subclass = childnodeTypeSub
# end class childnodeTypeSub
class linkTypeSub(supermod.linkType):
def __init__(self, refid=None, external=None, valueOf_=''):
supermod.linkType.__init__(self, refid, external)
supermod.linkType.subclass = linkTypeSub
# end class linkTypeSub
class listingTypeSub(supermod.listingType):
def __init__(self, codeline=None):
supermod.listingType.__init__(self, codeline)
supermod.listingType.subclass = listingTypeSub
# end class listingTypeSub
class codelineTypeSub(supermod.codelineType):
def __init__(self, external=None, lineno=None, refkind=None, refid=None, highlight=None):
supermod.codelineType.__init__(self, external, lineno, refkind, refid, highlight)
supermod.codelineType.subclass = codelineTypeSub
# end class codelineTypeSub
class highlightTypeSub(supermod.highlightType):
def __init__(self, class_=None, sp=None, ref=None, mixedclass_=None, content_=None):
supermod.highlightType.__init__(self, mixedclass_, content_)
supermod.highlightType.subclass = highlightTypeSub
# end class highlightTypeSub
class referenceTypeSub(supermod.referenceType):
def __init__(self, endline=None, startline=None, refid=None, compoundref=None, valueOf_='', mixedclass_=None, content_=None):
supermod.referenceType.__init__(self, mixedclass_, content_)
supermod.referenceType.subclass = referenceTypeSub
# end class referenceTypeSub
class locationTypeSub(supermod.locationType):
def __init__(self, bodystart=None, line=None, bodyend=None, bodyfile=None, file=None, valueOf_=''):
supermod.locationType.__init__(self, bodystart, line, bodyend, bodyfile, file)
supermod.locationType.subclass = locationTypeSub
# end class locationTypeSub
class docSect1TypeSub(supermod.docSect1Type):
def __init__(self, id=None, title='', para=None, sect2=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect1Type.__init__(self, mixedclass_, content_)
supermod.docSect1Type.subclass = docSect1TypeSub
# end class docSect1TypeSub
class docSect2TypeSub(supermod.docSect2Type):
def __init__(self, id=None, title='', para=None, sect3=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect2Type.__init__(self, mixedclass_, content_)
supermod.docSect2Type.subclass = docSect2TypeSub
# end class docSect2TypeSub
class docSect3TypeSub(supermod.docSect3Type):
def __init__(self, id=None, title='', para=None, sect4=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect3Type.__init__(self, mixedclass_, content_)
supermod.docSect3Type.subclass = docSect3TypeSub
# end class docSect3TypeSub
class docSect4TypeSub(supermod.docSect4Type):
def __init__(self, id=None, title='', para=None, internal=None, mixedclass_=None, content_=None):
supermod.docSect4Type.__init__(self, mixedclass_, content_)
supermod.docSect4Type.subclass = docSect4TypeSub
# end class docSect4TypeSub
class docInternalTypeSub(supermod.docInternalType):
def __init__(self, para=None, sect1=None, mixedclass_=None, content_=None):
supermod.docInternalType.__init__(self, mixedclass_, content_)
supermod.docInternalType.subclass = docInternalTypeSub
# end class docInternalTypeSub
class docInternalS1TypeSub(supermod.docInternalS1Type):
def __init__(self, para=None, sect2=None, mixedclass_=None, content_=None):
supermod.docInternalS1Type.__init__(self, mixedclass_, content_)
supermod.docInternalS1Type.subclass = docInternalS1TypeSub
# end class docInternalS1TypeSub
class docInternalS2TypeSub(supermod.docInternalS2Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS2Type.__init__(self, mixedclass_, content_)
supermod.docInternalS2Type.subclass = docInternalS2TypeSub
# end class docInternalS2TypeSub
class docInternalS3TypeSub(supermod.docInternalS3Type):
def __init__(self, para=None, sect3=None, mixedclass_=None, content_=None):
supermod.docInternalS3Type.__init__(self, mixedclass_, content_)
supermod.docInternalS3Type.subclass = docInternalS3TypeSub
# end class docInternalS3TypeSub
class docInternalS4TypeSub(supermod.docInternalS4Type):
def __init__(self, para=None, mixedclass_=None, content_=None):
supermod.docInternalS4Type.__init__(self, mixedclass_, content_)
supermod.docInternalS4Type.subclass = docInternalS4TypeSub
# end class docInternalS4TypeSub
class docURLLinkSub(supermod.docURLLink):
def __init__(self, url=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docURLLink.__init__(self, mixedclass_, content_)
supermod.docURLLink.subclass = docURLLinkSub
# end class docURLLinkSub
class docAnchorTypeSub(supermod.docAnchorType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docAnchorType.__init__(self, mixedclass_, content_)
supermod.docAnchorType.subclass = docAnchorTypeSub
# end class docAnchorTypeSub
class docFormulaTypeSub(supermod.docFormulaType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docFormulaType.__init__(self, mixedclass_, content_)
supermod.docFormulaType.subclass = docFormulaTypeSub
# end class docFormulaTypeSub
class docIndexEntryTypeSub(supermod.docIndexEntryType):
def __init__(self, primaryie='', secondaryie=''):
supermod.docIndexEntryType.__init__(self, primaryie, secondaryie)
supermod.docIndexEntryType.subclass = docIndexEntryTypeSub
# end class docIndexEntryTypeSub
class docListTypeSub(supermod.docListType):
def __init__(self, listitem=None):
supermod.docListType.__init__(self, listitem)
supermod.docListType.subclass = docListTypeSub
# end class docListTypeSub
class docListItemTypeSub(supermod.docListItemType):
def __init__(self, para=None):
supermod.docListItemType.__init__(self, para)
supermod.docListItemType.subclass = docListItemTypeSub
# end class docListItemTypeSub
class docSimpleSectTypeSub(supermod.docSimpleSectType):
def __init__(self, kind=None, title=None, para=None):
supermod.docSimpleSectType.__init__(self, kind, title, para)
supermod.docSimpleSectType.subclass = docSimpleSectTypeSub
# end class docSimpleSectTypeSub
class docVarListEntryTypeSub(supermod.docVarListEntryType):
def __init__(self, term=None):
supermod.docVarListEntryType.__init__(self, term)
supermod.docVarListEntryType.subclass = docVarListEntryTypeSub
# end class docVarListEntryTypeSub
class docRefTextTypeSub(supermod.docRefTextType):
def __init__(self, refid=None, kindref=None, external=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docRefTextType.__init__(self, mixedclass_, content_)
supermod.docRefTextType.subclass = docRefTextTypeSub
# end class docRefTextTypeSub
class docTableTypeSub(supermod.docTableType):
def __init__(self, rows=None, cols=None, row=None, caption=None):
supermod.docTableType.__init__(self, rows, cols, row, caption)
supermod.docTableType.subclass = docTableTypeSub
# end class docTableTypeSub
class docRowTypeSub(supermod.docRowType):
def __init__(self, entry=None):
supermod.docRowType.__init__(self, entry)
supermod.docRowType.subclass = docRowTypeSub
# end class docRowTypeSub
class docEntryTypeSub(supermod.docEntryType):
def __init__(self, thead=None, para=None):
supermod.docEntryType.__init__(self, thead, para)
supermod.docEntryType.subclass = docEntryTypeSub
# end class docEntryTypeSub
class docHeadingTypeSub(supermod.docHeadingType):
def __init__(self, level=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docHeadingType.__init__(self, mixedclass_, content_)
supermod.docHeadingType.subclass = docHeadingTypeSub
# end class docHeadingTypeSub
class docImageTypeSub(supermod.docImageType):
def __init__(self, width=None, type_=None, name=None, height=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docImageType.__init__(self, mixedclass_, content_)
supermod.docImageType.subclass = docImageTypeSub
# end class docImageTypeSub
class docDotFileTypeSub(supermod.docDotFileType):
def __init__(self, name=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docDotFileType.__init__(self, mixedclass_, content_)
supermod.docDotFileType.subclass = docDotFileTypeSub
# end class docDotFileTypeSub
class docTocItemTypeSub(supermod.docTocItemType):
def __init__(self, id=None, valueOf_='', mixedclass_=None, content_=None):
supermod.docTocItemType.__init__(self, mixedclass_, content_)
supermod.docTocItemType.subclass = docTocItemTypeSub
# end class docTocItemTypeSub
class docTocListTypeSub(supermod.docTocListType):
def __init__(self, tocitem=None):
supermod.docTocListType.__init__(self, tocitem)
supermod.docTocListType.subclass = docTocListTypeSub
# end class docTocListTypeSub
class docLanguageTypeSub(supermod.docLanguageType):
def __init__(self, langid=None, para=None):
supermod.docLanguageType.__init__(self, langid, para)
supermod.docLanguageType.subclass = docLanguageTypeSub
# end class docLanguageTypeSub
class docParamListTypeSub(supermod.docParamListType):
def __init__(self, kind=None, parameteritem=None):
supermod.docParamListType.__init__(self, kind, parameteritem)
supermod.docParamListType.subclass = docParamListTypeSub
# end class docParamListTypeSub
class docParamListItemSub(supermod.docParamListItem):
def __init__(self, parameternamelist=None, parameterdescription=None):
supermod.docParamListItem.__init__(self, parameternamelist, parameterdescription)
supermod.docParamListItem.subclass = docParamListItemSub
# end class docParamListItemSub
class docParamNameListSub(supermod.docParamNameList):
def __init__(self, parametername=None):
supermod.docParamNameList.__init__(self, parametername)
supermod.docParamNameList.subclass = docParamNameListSub
# end class docParamNameListSub
class docParamNameSub(supermod.docParamName):
def __init__(self, direction=None, ref=None, mixedclass_=None, content_=None):
supermod.docParamName.__init__(self, mixedclass_, content_)
supermod.docParamName.subclass = docParamNameSub
# end class docParamNameSub
class docXRefSectTypeSub(supermod.docXRefSectType):
def __init__(self, id=None, xreftitle=None, xrefdescription=None):
supermod.docXRefSectType.__init__(self, id, xreftitle, xrefdescription)
supermod.docXRefSectType.subclass = docXRefSectTypeSub
# end class docXRefSectTypeSub
class docCopyTypeSub(supermod.docCopyType):
def __init__(self, link=None, para=None, sect1=None, internal=None):
supermod.docCopyType.__init__(self, link, para, sect1, internal)
supermod.docCopyType.subclass = docCopyTypeSub
# end class docCopyTypeSub
class docCharTypeSub(supermod.docCharType):
def __init__(self, char=None, valueOf_=''):
supermod.docCharType.__init__(self, char)
supermod.docCharType.subclass = docCharTypeSub
# end class docCharTypeSub
class docParaTypeSub(supermod.docParaType):
def __init__(self, char=None, valueOf_=''):
supermod.docParaType.__init__(self, char)
self.parameterlist = []
self.simplesects = []
self.content = []
def buildChildren(self, child_, nodeName_):
supermod.docParaType.buildChildren(self, child_, nodeName_)
if child_.nodeType == Node.TEXT_NODE:
obj_ = self.mixedclass_(MixedContainer.CategoryText,
MixedContainer.TypeNone, '', child_.nodeValue)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == "ref":
obj_ = supermod.docRefTextType.factory()
obj_.build(child_)
self.content.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'parameterlist':
obj_ = supermod.docParamListType.factory()
obj_.build(child_)
self.parameterlist.append(obj_)
elif child_.nodeType == Node.ELEMENT_NODE and \
nodeName_ == 'simplesect':
obj_ = supermod.docSimpleSectType.factory()
obj_.build(child_)
self.simplesects.append(obj_)
supermod.docParaType.subclass = docParaTypeSub
# end class docParaTypeSub
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj
| 40.209486 | 628 | 0.774747 |
acfa0e55dc8f624ea7c1612098f3ff1b277f7322 | 213 | py | Python | 0-python-tutorial/11-tuples06.py | luis2ra/py3-00-w3schools | 6bb851837f8ef9520491d13fa2c909047c9b18cf | [
"MIT"
] | null | null | null | 0-python-tutorial/11-tuples06.py | luis2ra/py3-00-w3schools | 6bb851837f8ef9520491d13fa2c909047c9b18cf | [
"MIT"
] | null | null | null | 0-python-tutorial/11-tuples06.py | luis2ra/py3-00-w3schools | 6bb851837f8ef9520491d13fa2c909047c9b18cf | [
"MIT"
] | null | null | null | # Demo Python Tuples
'''
Tuple Length
To determine how many items a tuple has, use the len() method.
'''
# Print the number of items in the tuple:
thistuple = ("apple", "banana", "cherry")
print(len(thistuple)) | 19.363636 | 62 | 0.694836 |
acfa0eb6e973c8b6e5567e07dfd5f79da911d77e | 16,940 | py | Python | cloud_info_provider/tests/test_static.py | maricaantonacci/cloud-info-provider-deep | 3b8dff97c536da0e49ea8b367be5c9717a721c85 | [
"Apache-2.0"
] | 1 | 2020-02-03T17:18:07.000Z | 2020-02-03T17:18:07.000Z | cloud_info_provider/tests/test_static.py | maricaantonacci/cloud-info-provider-deep | 3b8dff97c536da0e49ea8b367be5c9717a721c85 | [
"Apache-2.0"
] | 9 | 2018-10-16T12:47:06.000Z | 2021-01-07T11:59:00.000Z | cloud_info_provider/tests/test_static.py | maricaantonacci/cloud-info-provider-deep | 3b8dff97c536da0e49ea8b367be5c9717a721c85 | [
"Apache-2.0"
] | 2 | 2019-02-14T15:20:59.000Z | 2020-10-13T10:50:29.000Z | import os.path
import mock
import six
from cloud_info_provider import exceptions
from cloud_info_provider.providers import static as static_provider
from cloud_info_provider.tests import base
from cloud_info_provider.tests import data
DATA = data.DATA
class StaticProviderTest(base.TestCase):
def setUp(self):
super(StaticProviderTest, self).setUp()
class Opts(object):
yaml_file = None
site_in_suffix = False
glite_site_info_static = "foo"
debug = False
cwd = os.path.dirname(__file__)
yaml_file = os.path.join(cwd, "..", "..", "etc", "sample.static.yaml")
self.opts = Opts()
self.opts.yaml_file = yaml_file
self.provider = static_provider.StaticProvider(self.opts)
def test_get_fields(self):
cases = (
(
['bar', 'baz', 'foobar'], # fields
'', # prefix
{'bar': 1, 'baz': 2, 'bazonk': 3}, # data
{}, # yaml
{}, # defaults
{'bar': 1, 'foobar': None, 'baz': 2} # expected
),
(
['bar', 'baz', 'foobar'],
'foo_',
{'bar': 1, 'baz': 2, 'bazonk': 3},
{},
{},
{'foo_bar': 1, 'foo_foobar': None, 'foo_baz': 2}
),
(
['bar', 'baz', 'foobar'],
'foo_',
None,
{'bar': 1, 'baz': 2, 'bazonk': 3},
{},
{'foo_bar': 1, 'foo_foobar': None, 'foo_baz': 2}
),
(
['bar', 'baz', 'foobar'],
'foo_',
{'bar': 1, 'baz': 2, 'bazonk': 3},
{},
{'foobar': 'barfoo'},
{'foo_bar': 1, 'foo_foobar': 'barfoo', 'foo_baz': 2}
),
)
for fields, prefix, indata, yaml, defaults, expected in cases:
self.provider.yaml = yaml
ret = self.provider._get_fields_and_prefix(fields,
prefix,
indata,
defaults=defaults)
self.assertEqual(expected, ret)
def test_get_defaults_from_yaml(self):
cases = (
(
'foo', # what
'bar', # which
'', # prefix
{}, # yaml
{}, # expected
),
(
'foo',
'bar',
'',
{'foo': {'bar': {'defaults': None}}},
{},
),
(
'foo',
'bar',
'',
{'foo': {'bar': {'defaults': {'foobar': 'barfoo'}}}},
{'foobar': 'barfoo'},
),
(
'foo',
'bar',
'brink_',
{'foo': {'bar': {'defaults': {'foobar': 'barfoo'}}}},
{'brink_foobar': 'barfoo'},
),
)
for what, which, prefix, yaml, expected in cases:
self.provider.yaml = yaml
ret = self.provider._get_defaults_from_yaml(what,
which,
prefix=prefix)
self.assertEqual(expected, ret)
def test_get_what(self):
cases = (
(
'foo', # What
None, # which
None, # g_fields
None, # fields
None, # prefix
{}, # yaml
{}, # expected
),
(
'foo',
'bar',
None,
None,
None,
{'foo': {}},
{'bar': {}},
),
(
'foo',
'bar',
None,
['bazonk'],
None,
{'foo': {'bar': {'baz': {'bazonk': 1}}}},
{'bar': {'baz': {'foo_bazonk': 1}}}
),
(
'foo',
'bar',
['bronk'],
['bazonk'],
None,
{'foo': {'bronk': 'brink', 'bar': {'baz': {'bazonk': 1}}}},
{'bar': {'baz': {'foo_bazonk': 1}}, 'foo_bronk': 'brink'}
),
)
for what, which, g_fields, fields, prefix, yaml, expected in cases:
self.provider.yaml = yaml
ret = self.provider._get_what(what,
which,
g_fields,
fields,
prefix=prefix)
self.assertEqual(expected, ret)
def test_get_image_defaults(self):
yaml = {'compute': {'images': {'defaults': {'foo': 'bar',
'baz': 'bazonk'}}}}
self.provider.yaml = yaml
self.assertEqual({'foo': 'bar', 'baz': 'bazonk'},
self.provider.get_image_defaults())
self.assertEqual({'image_foo': 'bar', 'image_baz': 'bazonk'},
self.provider.get_image_defaults(prefix=True))
def test_get_template_defaults(self):
yaml = {'compute': {'templates': {'defaults': {'foo': 'bar',
'baz': 'bazonk'}}}}
self.provider.yaml = yaml
self.assertEqual({'foo': 'bar', 'baz': 'bazonk', 'network_out': True},
self.provider.get_template_defaults())
self.assertEqual({'template_foo': 'bar', 'template_baz': 'bazonk',
'template_network_out': True},
self.provider.get_template_defaults(prefix=True))
def test_get_compute_endpoint_defaults(self):
yaml = {'compute': {'endpoints': {'defaults': {'foo': 'bar',
'baz': 'bazonk'}}}}
self.provider.yaml = yaml
unprefixed = self.provider.get_compute_endpoint_defaults()
self.assertEqual(unprefixed.pop('foo'), 'bar')
self.assertEqual(unprefixed.pop('baz'), 'bazonk')
self.assertEqual({'api_authn_method': 'oidc',
'api_endpoint_technology': 'webservice',
'capabilities': [
'executionmanagement.dynamicvmdeploy',
'security.accounting'
],
'failover': False,
'live_migration': False,
'max_dedicated_ram': 0,
'min_dedicated_ram': 0,
'production_level': 'production',
'service_capabilities': [
'executionmanagement.dynamicvmdeploy',
'security.accounting'
],
'service_production_level': 'production',
'total_cores': 0,
'total_ram': 0,
'total_accelerators': 0,
'vm_backup_restore': False}, unprefixed)
prefixed = self.provider.get_compute_endpoint_defaults(prefix=True)
self.assertEqual(prefixed.pop('compute_foo'), 'bar')
self.assertEqual(prefixed.pop('compute_baz'), 'bazonk')
self.assertEqual({'compute_api_authn_method': 'oidc',
'compute_api_endpoint_technology': 'webservice',
'compute_capabilities': [
'executionmanagement.dynamicvmdeploy',
'security.accounting'
],
'compute_failover': False,
'compute_live_migration': False,
'compute_max_dedicated_ram': 0,
'compute_min_dedicated_ram': 0,
'compute_production_level': 'production',
'compute_service_capabilities': [
'executionmanagement.dynamicvmdeploy',
'security.accounting'
],
'compute_service_production_level': 'production',
'compute_total_cores': 0,
'compute_total_ram': 0,
'compute_total_accelerators': 0,
'compute_vm_backup_restore': False}, prefixed)
def test_get_storage_endpoint_defaults(self):
yaml = {'storage': {'endpoints': {'defaults': {'foo': 'bar',
'baz': 'bazonk'}}}}
self.provider.yaml = yaml
self.assertEqual({'foo': 'bar', 'baz': 'bazonk'},
self.provider.get_storage_endpoint_defaults())
self.assertEqual(
{'storage_foo': 'bar', 'storage_baz': 'bazonk'},
self.provider.get_storage_endpoint_defaults(prefix=True)
)
def test_get_empty_storage_endpoints(self):
expected = {}
self.provider.yaml = {}
self.assertEqual(expected, self.provider.get_storage_endpoints())
def test_get_empty_compute_endpoints(self):
expected = {}
self.provider.yaml = {}
self.assertEqual(expected, self.provider.get_compute_endpoints())
def test_get_default_storage_service_name(self):
self.provider.yaml = {'storage': {'endpoints': {}}}
with mock.patch('socket.getfqdn') as m_fqdn:
m_fqdn.return_value = 'foo'
ep = self.provider.get_storage_endpoints()
self.assertEqual('foo', ep.get('storage_service_name'))
# def test_get_default_compute_service_name(self):
# self.provider.yaml = {'compute': {'endpoints': {}}}
# with mock.patch('socket.getfqdn') as m_fqdn:
# m_fqdn.return_value = 'foo'
# ep = self.provider.get_compute_endpoints()
# self.assertEqual('foo', ep.get('compute_service_name'))
def test_get_storage_endpoints(self):
expected = DATA.storage_endpoints
expected.update({
'storage_iam_enabled': None,
})
with mock.patch('socket.getfqdn') as m_fqdn:
m_fqdn.return_value = 'example.org'
self.assertEqual(expected, self.provider.get_storage_endpoints())
def test_get_compute_endpoints(self):
expected = DATA.compute_endpoints
# fill in missing values
expected.update({
'compute_accelerators_virt_type': None,
'compute_network_virt_type': None,
'compute_cpu_virt_type': None,
'compute_virtual_disk_formats': None,
'compute_public_ip_assignable': None,
'compute_iam_enabled': None,
})
# with mock.patch('socket.getfqdn') as m_fqdn:
# m_fqdn.return_value = 'example.org'
# self.assertEqual(expected, self.provider.get_compute_endpoints())
self.assertEqual(expected, self.provider.get_compute_endpoints())
def test_no_site_name(self):
self.opts.glite_site_info_static = "This does not exist"
self.assertRaises(exceptions.StaticProviderException,
self.provider.get_site_info)
def test_get_suffix_default(self):
site_info = {'site_name': 'SITE_NAME'}
self.assertEqual("o=glue", self.provider._get_suffix(site_info))
def test_get_suffix_site_in_suffix(self):
site_info = {'site_name': 'SITE_NAME'}
self.provider.opts.site_in_suffix = True
self.assertEqual("GLUE2DomainID=SITE_NAME,o=glue",
self.provider._get_suffix(site_info))
def test_get_site_info_no(self):
data = six.StringIO("SITE_NAME = SITE_NAME")
expected = DATA.site_info
with mock.patch('cloud_info_provider.providers.static.open',
create=True) as m_open:
m_open.return_value.__enter__ = lambda x: data
m_open.return_value.__exit__ = mock.Mock()
self.assertEqual(expected, self.provider.get_site_info())
def test_get_images(self):
expected = DATA.compute_images
# add undefined values
for img in expected.values():
for field in ['image_accel_type',
'image_access_info',
'image_context_format',
'image_description',
'image_id',
'image_minimal_accel',
'image_minimal_cpu',
'image_minimal_ram',
'image_native_id',
'image_recommended_accel',
'image_recommended_cpu',
'image_recommended_ram',
'image_software',
'image_traffic_in',
'image_traffic_out']:
if field not in img:
img[field] = None
self.assertEqual(expected, self.provider.get_images())
def test_get_images_with_yaml(self):
yaml = {
'compute': {
'images': {
'defaults': {
'platform': 'amd64',
},
'os#fooid': {
'name': 'Foo Image',
'version': 1.0,
'marketplace_id': 'http://example.org/foo',
'os_family': 'linux',
'os_name': 'Cirros',
'os_version': 1.0,
'os_type': 'linux',
'architecture': 'amd64',
},
'os#barid': {
'name': 'Bar Image',
'version': 2.0,
'marketplace_id': 'http://example.org/bar',
'os_family': 'linux',
'os_name': 'Cirros',
'os_version': 2.0,
'os_type': 'linux',
'platform': 'i686',
'architecture': 'i686',
},
}
}
}
expected = {
'os#barid': {
'image_marketplace_id': 'http://example.org/bar',
'image_name': 'Bar Image',
'image_os_family': 'linux',
'image_os_name': 'Cirros',
'image_os_type': 'linux',
'image_os_version': 2.0,
'image_platform': 'i686',
'image_architecture': 'i686',
'image_version': 2.0
},
'os#fooid': {
'image_marketplace_id': 'http://example.org/foo',
'image_name': 'Foo Image',
'image_os_family': 'linux',
'image_os_name': 'Cirros',
'image_os_type': 'linux',
'image_os_version': 1.0,
'image_platform': 'amd64',
'image_architecture': 'amd64',
'image_version': 1.0
}
}
for img in expected.values():
for field in ['image_accel_type',
'image_access_info',
'image_context_format',
'image_description',
'image_id',
'image_minimal_accel',
'image_minimal_cpu',
'image_minimal_ram',
'image_native_id',
'image_recommended_accel',
'image_recommended_cpu',
'image_recommended_ram',
'image_software',
'image_traffic_in',
'image_traffic_out']:
if field not in img:
img[field] = None
self.provider.yaml = yaml
self.assertEqual(expected, self.provider.get_images())
def test_get_templates(self):
expected = DATA.compute_templates
for tpl in expected.values():
# default values from file
tpl.update({
'template_disk': None,
'template_ephemeral': None,
'template_network_in': 'undefined',
'template_network_out': True,
})
self.assertEqual(expected, self.provider.get_templates())
| 39.95283 | 79 | 0.449705 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.