blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
13d80f56e85681da4140ed64b47d36026edbf212 | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2019_11_01/operations/_subscriptions_operations.py | 44a1d6b996b444a635fd1f5eca5b7f564867551d | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 10,390 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class SubscriptionsOperations(object):
"""SubscriptionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.subscriptions.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_locations(
self,
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.LocationListResult"]
"""Gets all available geo-locations.
This operation provides all the locations that are available for resource providers; however,
each resource provider may support a subset of this list.
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LocationListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.subscriptions.v2019_11_01.models.LocationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LocationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_locations.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('LocationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_locations.metadata = {'url': '/subscriptions/{subscriptionId}/locations'} # type: ignore
def get(
self,
subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Subscription"
"""Gets details about a specified subscription.
:param subscription_id: The ID of the target subscription.
:type subscription_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Subscription, or the result of cls(response)
:rtype: ~azure.mgmt.resource.subscriptions.v2019_11_01.models.Subscription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Subscription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("subscription_id", subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Subscription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.SubscriptionListResult"]
"""Gets all subscriptions for a tenant.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SubscriptionListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.subscriptions.v2019_11_01.models.SubscriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SubscriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('SubscriptionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions'} # type: ignore
| [
"noreply@github.com"
] | scbedd.noreply@github.com |
2614cf1f44792beeb55c2a2e4257282366b8da9c | f33b30743110532ddae286ba1b34993e61669ab7 | /869. Reordered Power of 2.py | 9fecfa97156c715493e1bcf2e58aab9b47cf8034 | [] | no_license | c940606/leetcode | fe9dcee7a5daa4d52999d5f53253dd6dd33c348b | 631df2ce6892a6fbb3e435f57e90d85f8200d125 | refs/heads/master | 2021-07-10T14:01:26.164966 | 2020-08-16T10:46:16 | 2020-08-16T10:46:16 | 186,588,449 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | class Solution:
def reorderedPowerOf2(self, N):
"""
:type N: int
:rtype: bool
"""
from collections import Counter
c = Counter(str(N))
return any(c == Counter(str(1 << i)) for i in range(32))
a = Solution()
print(a.reorderedPowerOf2(16))
| [
"762307667@qq.com"
] | 762307667@qq.com |
3cc89fb96f0e655676a5d37c4ad739b14a702333 | 55e8d271134c0f6a77e1fe3416df06510b711309 | /build/lib/pypi_test/__main__.py | 6449cc35d8cdc3fcb31e95ba1437b577579357f4 | [] | no_license | stephendarling/pypi-test-sdarling | 9bc08445d1e4c3a156177a55755ad31d862f1c09 | 58b7305262df2f37e2047a24b0933273d1892c95 | refs/heads/master | 2020-04-15T10:25:08.959164 | 2019-01-08T10:23:41 | 2019-01-08T10:23:41 | 164,595,145 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 126 | py | import lib.utilities as utilities
def main():
return utilities.add_two_values(1, 9)
if __name__ == "__main__":
main() | [
"stephen.darling@outlook.com"
] | stephen.darling@outlook.com |
0e4d898d2098fc6455c2670e9ca36f48ea446dc7 | 04a0dd75d8c7c57dbe11f5d04169685ac99bc87c | /luizalabs/apps/employee/urls.py | e1656b053f207cc5298667a67fe38a5cc6c85852 | [] | no_license | glaucia86/employee-manager-app | 2ddfa3b3b1c5f115aa84e7d17e55466cca1782b1 | 99129f954ab2ccb27f187583d33689a1a8ff8bbb | refs/heads/master | 2021-01-13T08:22:33.352884 | 2017-02-12T04:11:49 | 2017-02-12T04:11:49 | 81,622,515 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | from django.conf.urls import url, include
from apps.employee.views import index
urlpatterns = [
url(r'^$', index),
]
| [
"glaucia_lemos86@hotmail.com"
] | glaucia_lemos86@hotmail.com |
02d557bd344732d3594ca85a6a9820e0e06a97dc | 980c52aad42bf878c12dc91ccf850114dce582d1 | /scripts/duplication_wide_to_long.py | ad93d7ef74f4113258ad585280616b8a3fef34d7 | [] | no_license | samebdon/samparse | b85bc538acdc988a8bd7d042a5a9f17f534c7d7f | 41216d182e461acc41c1dc5d354bfbfa23fbea2a | refs/heads/main | 2023-04-03T00:54:23.190651 | 2021-04-09T14:34:55 | 2021-04-09T14:34:55 | 356,230,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from docopt import docopt
from matplotlib import pyplot
from os.path import isfile
import sys
import re
import pysam
import numpy as np
import pandas as pd
dup_df = pd.read_csv("orthofinder/Gene_Duplication_Events/Duplications.tsv", sep="\t")
#Orthogroup, species tree node, gene tree node, support, type, genes 1, genes 2
print(dup_df) | [
"samebdon@gmail.com"
] | samebdon@gmail.com |
88a1c28c43685c4337fb31b86dc9354426552f35 | f95d42e1ee37710bf4b5069dc078ee476be2639a | /movies/templatetags/movie_tag.py | 07095b8b2e06d07a34a8464d88ed686ada6d6e5e | [] | no_license | AristokratM/django-movie | 23b67a9e002314544aa9e9525f8224155440e8af | 5f593dba74f5c6a121d076f89ce601ec4f738d7b | refs/heads/main | 2023-02-12T07:15:56.690896 | 2021-01-17T19:29:52 | 2021-01-17T19:29:52 | 328,722,123 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | from django import template
from movies.models import Category, Movie
register = template.Library()
@register.simple_tag()
def get_categories():
"""Вивід всіх категорій"""
return Category.objects.all()
@register.inclusion_tag("movies/tags/last_movies.html")
def get_last_movies(count):
movies = Movie.objects.all().order_by('-id')[:count]
return {'last_movies': movies}
@register.inclusion_tag("movies/tags/comment_list.html")
def get_comment_list(movie):
reviews = movie.get_review()
return {'reviews': reviews}
@register.inclusion_tag("movies/tags/inner_reviews.html")
def get_inner_reviews(review):
inner_reviews = review.reviews_set.all()
return {'inner_reviews': inner_reviews}
| [
"61250102+AristokratM@users.noreply.github.com"
] | 61250102+AristokratM@users.noreply.github.com |
100195dfd715236cf3362301bc411a12a0be41c5 | 693567f042c6bd93ecdda41cb5db81c55ccf3158 | /List/swap two elements in a list (another approach).py | c013b3bb504c2f734752ab41623c4161c62e0bf9 | [] | no_license | supriyo-pal/Python-Practice | 5806e0045ebfeb04856246a245430e2ab7921ba9 | 2025369f0d23d603ad27eaff149500137e98dbcf | refs/heads/main | 2023-01-25T05:31:58.404283 | 2020-12-09T19:08:22 | 2020-12-09T19:08:22 | 317,021,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 2 21:57:25 2020
@author: Supriyo
"""
number=input("enter the numbers between 0-9:")
number_list=list()
length=0
for i in range(len(number)):
number_list.append(number[i])
length=len(number_list)
print("Choose one position between 0 to",length)
pos1=int(input())
print("Choose another position except ",pos1)
pos2=int(input())
def swapPositions(list, pos1, pos2):
# popping both the elements from list
first_ele = list.pop(pos1)
second_ele = list.pop(pos2-1)
# inserting in each others positions
list.insert(pos1, second_ele)
list.insert(pos2, first_ele)
return list
# Driver function
print(swapPositions(number_list, pos1-1, pos2-1)) | [
"noreply@github.com"
] | supriyo-pal.noreply@github.com |
70d720d10e659214310f4d9a51e83db0617d1d3b | 1edc7db0b0bcb7dbc763d65bf39f97fcdd2d639e | /docusign_esign/models/proof_service_view_link.py | 8f5520bd29a1fb840ffab4bd05c9c44fd46e32cf | [
"MIT"
] | permissive | harshaboggaram/docusign-esign-python-client | f59031c3e4ac30edb1306b587e555f3d1f683b2f | 45b896956369e913dce13a2cb39ce1487ddb3a8d | refs/heads/master | 2023-08-25T14:23:33.420231 | 2021-11-04T20:18:47 | 2021-11-04T20:18:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,596 | py | # coding: utf-8
"""
DocuSign REST API
The DocuSign REST API provides you with a powerful, convenient, and simple Web services API for interacting with DocuSign. # noqa: E501
OpenAPI spec version: v2.1
Contact: devcenter@docusign.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from docusign_esign.client.configuration import Configuration
class ProofServiceViewLink(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'view_link': 'str'
}
attribute_map = {
'view_link': 'ViewLink'
}
def __init__(self, _configuration=None, **kwargs): # noqa: E501
"""ProofServiceViewLink - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._view_link = None
self.discriminator = None
setattr(self, "_{}".format('view_link'), kwargs.get('view_link', None))
@property
def view_link(self):
"""Gets the view_link of this ProofServiceViewLink. # noqa: E501
# noqa: E501
:return: The view_link of this ProofServiceViewLink. # noqa: E501
:rtype: str
"""
return self._view_link
@view_link.setter
def view_link(self, view_link):
"""Sets the view_link of this ProofServiceViewLink.
# noqa: E501
:param view_link: The view_link of this ProofServiceViewLink. # noqa: E501
:type: str
"""
self._view_link = view_link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProofServiceViewLink, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProofServiceViewLink):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ProofServiceViewLink):
return True
return self.to_dict() != other.to_dict()
| [
"noreply@github.com"
] | harshaboggaram.noreply@github.com |
92dfb2370079d41cc4a85a012de1c4209c13bbe6 | 1fc12b97b47a35e7232cd85c20d1976d6b85a80d | /evanchow_submission.py | c87d8f76837ca0b55463311cab2ce294a0114500 | [] | no_license | richard512/capitalone | 6d416a767cca0b0fee4c3732f5f5aaa1f874a468 | 683949e3109221b44f0ceb1f54750ff2447726a0 | refs/heads/master | 2020-02-26T16:27:50.008117 | 2014-03-22T00:32:35 | 2014-03-22T00:32:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | ##############################################################
# Name: Evan Chow
# Email: echow@princeton.edu
# This script uses Pandas dataframes to find the metrics.
##############################################################
import pandas as pd
# Read data into a Pandas dataframe.
data = pd.read_csv(open('Metropolitan_Populations__2010-2012_.csv', 'rb'))
# Deliverable #1.
p50k = data.ix[data['2010 Population'] >= 50000].reset_index(drop=True)
del p50k['2011 Population']
city, p10, p12 = [p50k[p50k.columns[n]] for n in xrange(3)]
pChange = [(100. * (p12[i] - p10[i])/p10[i], city[i]) for i in p50k.index]
pChange.sort(reverse=True)
print "Top five cities to target based on highest population growth: "
for i in pChange[0:5]:
print i[1]
# Deliverable #2.
pChange.sort()
print "\nTop 5 cities to avoid based on most shrinking population: "
for i in pChange[0:5]:
print i[1]
# Deliverable #3.
data['Growth'] = data['2012 Population'] - data['2010 Population']
growth = [(data['Growth'][i], data['Geography'][i]) for i in data.index]
growth.sort(reverse=True)
print "\nTop five states with highest cumulative growth: "
for i in growth[0:5]:
print i[1] | [
"evancchow@gmail.com"
] | evancchow@gmail.com |
7281be76556fd8d629e4800ad5a8fe24b6f8e645 | 2880ec0276330b0d3248ad21476769a4959a9f66 | /life_expectancy/views.py | 78b6b7477419a6eec68e53b376c7cb384fadaf8f | [
"MIT"
] | permissive | Cally99/Data-Labs-with-Python-Tableau | 5a13c3b4d241633848c7a16d83977e9ee9ca9599 | 6fe58cd45d6c2b058abe771b3ffcf108b8832a49 | refs/heads/master | 2020-07-06T00:47:27.801260 | 2019-07-03T06:50:27 | 2019-07-03T06:50:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | from django.shortcuts import render
def life_expectancy(request):
return render(request, "life_expectancy.html") | [
"lucasmalucha@gmail.com"
] | lucasmalucha@gmail.com |
39ec4bf8e6a7b867141864c990b093060312ebb6 | a82df8590bdcf2bb4e01e0b610a0765706dc0dbe | /python/ofile.py | 49b7d15b1658d5ec3c58e05371cceda0bb8ecf3a | [] | no_license | bryankuo/lab | db1923ff1479a0dfe3d4fe6b41fb47f97fb19b37 | 521865b25f632887d05a951365e2497c4c22631b | refs/heads/master | 2023-09-04T00:44:33.832607 | 2023-09-03T11:08:28 | 2023-09-03T11:08:28 | 64,229,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 495 | py | #!/usr/bin/env python3
import sys,csv
'''
def open_and_parse(fname):
f = open(fname, "r")
print(f.readline())
return 0
'''
def open_and_parse(fname):
with open(fname, 'r') as csvfile:
csv_reader = csv.reader(csvfile, delimiter=',')
line_count = 0
for row in csv_reader:
line_count += 1
print(row)
print("#row: " + line_count)
return 0
def main():
open_and_parse(sys.argv[1])
if __name__ == "__main__":
main()
| [
"cckuo.at.home@gmail.com"
] | cckuo.at.home@gmail.com |
919f62f59e07d172d3703788a539f9b1ddd5d54d | d6a0def07da332d6f2cc250f4fd59b314d8f1099 | /Cellgraphrt5.py | e89f547e0fbec255c050392c098741c5cc90c747 | [] | no_license | ghsalem/exadata_graphs | eb3cfa413a24e0e23b029cd0366a3cc0a47b87b1 | dbc788d3aa00b452cd0d05573d5a60d0723d995b | refs/heads/master | 2020-03-19T06:31:45.107716 | 2018-06-04T13:53:41 | 2018-06-04T13:53:41 | 136,029,662 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,009 | py | import subprocess
import os
import pandas as pd
import numpy as np
from PyQt5 import QtCore, QtWidgets, QtWidgets
from matplotlib import pyplot as plt
from matplotlib import dates as mdates
from matplotlib.pyplot import cm
import datetime as dt
from matplotlib import rcParams
from matplotlib.backends.backend_qt5 import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
import mplcursors
class CellGraphrtPage:
def __init__(self,obj, cell, avg):
self.figure=""
rcParams.update({'figure.autolayout': True})
self.class_sql_text=""
self.chosen_class=""
self.lines=dict()
self.graph=""
self.avg=avg
self.parent=obj
self.qfigwidget=QtWidgets.QWidget(self.parent.ga_sa_contents)
winWidth = 383
winHeight = 384
self.dpi=100
self.figure=plt.Figure((winWidth/self.dpi, winHeight/self.dpi), dpi=self.dpi)#MplCanvas(self.parent.ga_sa_contents)#self.parent.GraphScrollArea)
self.canvas=FigureCanvas(self.figure)
self.canvas.setParent(self.qfigwidget)
self.navi_toolbar = NavigationToolbar(self.canvas,self.qfigwidget)#self.parent.centralwidget)
self.plotLayout = QtWidgets.QVBoxLayout()
self.plotLayout.addWidget(self.canvas)
self.plotLayout.addWidget(self.navi_toolbar)
self.qfigwidget.setLayout(self.plotLayout)
self.parent.ga_vert_layout.addWidget(self.qfigwidget)
self.ax=None
self.old_waits=""
self.dpi=self.figure.dpi
self.begin_date=None
self.all_data=None
self.sysdate=True
self.metric_name=self.parent.metric
self.first_time=True
self.starting_date=(dt.datetime.now()-dt.timedelta(minutes=5)).strftime(' %Y-%m-%dT%H:%M:%S+01:00')
self.xfmt = mdates.DateFormatter('%H:%M:%S')
self.canvas.setMinimumSize(self.canvas.size())
self.cell=cell
if self.parent.list_of_files is None:
self.redraw_events()
def remove_graph(self):
self.parent.ga_vert_layout.removeWidget(self.qfigwidget)
def redraw_events(self):
self.metric="Metric"
#print("metric ",self.metric)
#self.all_data=self.parent.current_metrics[self.parent.current_metrics['Cell']==self.cell].copy(deep=True)
seconds_diff=dict()
if self.avg:
col='Type'
self.all_data=self.parent.current_metrics[self.parent.current_metrics['Cell']==self.cell].groupby(['Cell','Type','Timestamp']).sum().reset_index()
list_disks_u=list(self.all_data.Type.unique())
lab_add='(Sum of '+self.metric+' per type)'
else:
col='Disk'
self.all_data=self.parent.current_metrics[self.parent.current_metrics['Cell']==self.cell].copy(deep=True)
list_disks_u=list(self.all_data.Disk.unique())
lab_add='('+self.metric_name+' per device)'
#print (current_metrics)
#print(self.metric)
if self.ax is None:
self.ax=self.figure.add_axes([.06,.15,.75,.80])
self.ax2=self.ax.twinx()
self.ax.set_facecolor('w')
xticklabels=list(self.all_data['Timestamp'])
#print(list_cells_u)
xtickmin=xticklabels[0]
xtickmax=xticklabels[len(xticklabels)-1]
#print(@neticks)
xticks=np.arange(len(xticklabels))
#print("TS: ",len(xticks))
color=iter(cm.rainbow(np.linspace(0,1,len(list_disks_u))))
self.ax.cla()
self.ax2.cla()
stacked_data=[]
nbr_lines1=0
nbr_lines2=0
for n,i in enumerate(list_disks_u):
#print("i=",i," col=",col)
if self.all_data[self.all_data[col]==i].Metric.max()>0:
color_n=next(color)
# mindelta=self.all_data[self.all_data['Disk']==i]['DELTA'][1:].min()
xticklabels=list(self.all_data[self.all_data[col]==i]['Timestamp'])
# if mindelta<0:
total_waits=self.all_data[self.all_data[col]==i][self.metric]
total_waits=pd.Series(total_waits)
if len(total_waits)>20:
total_waits.rolling(window=5,center=True).mean()
if i[0]=='n' or i=='Flash':
l1=self.ax.plot(xticklabels,total_waits, color=color_n,label=i)
mplcursors.cursor(l1)
nbr_lines1+=1
else:
l2=self.ax2.plot(xticklabels,total_waits, color=color_n,label=i)
mplcursors.cursor(l2)
nbr_lines2+=1
if nbr_lines1+nbr_lines2>0:
self.ax.xaxis.set_major_formatter(self.xfmt)
#self.ax.set_xticks(xticklabels)
#print(xtickmin,xtickmax)
# self.ax.set_xticklabels(xticklabels, rotation=45, fontsize=8, ha='center')
self.ax.set_xlim([xtickmin,xtickmax])
for tick in self.ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
tick.label.set_rotation(40)
for tick in self.ax.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
# self.ax.yaxis.tick_right() #params(axis='y', direction='in')
if nbr_lines1>0:
self.ax.legend(loc='center right', bbox_to_anchor=(0.1,.8),
fontsize=8, ncol=1,fancybox=True, shadow=True)
if nbr_lines2>0:
self.ax2.legend(loc='center left', bbox_to_anchor=(1.1,.5),
fontsize=8, ncol=1,fancybox=True, shadow=True)
self.ax.set_title(self.cell+lab_add, fontsize=8)
# mplcursors.cursor()
self.canvas.draw()
| [
"salem.ghassan@gmail.com"
] | salem.ghassan@gmail.com |
f735e541e2852a473ab392064cf9429ac1a90ffc | 0db19410e9751790af8ce4a0a9332293e379c02f | /configs/body_2d_keypoint/rtmpose/humanart/rtmpose-s_8xb256-420e_humanart-256x192.py | b4263f25e741e25a0ec5b85900ff1b2587d2805d | [
"Apache-2.0"
] | permissive | open-mmlab/mmpose | 2c9986521d35eee35d822fb255e8e68486026d94 | 537bd8e543ab463fb55120d5caaa1ae22d6aaf06 | refs/heads/main | 2023-08-30T19:44:21.349410 | 2023-07-04T13:18:22 | 2023-07-04T13:18:22 | 278,003,645 | 4,037 | 1,171 | Apache-2.0 | 2023-09-14T09:44:55 | 2020-07-08T06:02:55 | Python | UTF-8 | Python | false | false | 6,656 | py | _base_ = ['../../../_base_/default_runtime.py']
# runtime
max_epochs = 420
stage2_num_epochs = 30
base_lr = 4e-3
train_cfg = dict(max_epochs=max_epochs, val_interval=10)
randomness = dict(seed=21)
# optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.),
paramwise_cfg=dict(
norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
# learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0e-5,
by_epoch=False,
begin=0,
end=1000),
dict(
# use cosine lr from 210 to 420 epoch
type='CosineAnnealingLR',
eta_min=base_lr * 0.05,
begin=max_epochs // 2,
end=max_epochs,
T_max=max_epochs // 2,
by_epoch=True,
convert_to_iter_based=True),
]
# automatically scaling LR based on the actual training batch size
auto_scale_lr = dict(base_batch_size=1024)
# codec settings
codec = dict(
type='SimCCLabel',
input_size=(192, 256),
sigma=(4.9, 5.66),
simcc_split_ratio=2.0,
normalize=False,
use_dark=False)
# model settings
model = dict(
type='TopdownPoseEstimator',
data_preprocessor=dict(
type='PoseDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True),
backbone=dict(
_scope_='mmdet',
type='CSPNeXt',
arch='P5',
expand_ratio=0.5,
deepen_factor=0.33,
widen_factor=0.5,
out_indices=(4, ),
channel_attention=True,
norm_cfg=dict(type='SyncBN'),
act_cfg=dict(type='SiLU'),
init_cfg=dict(
type='Pretrained',
prefix='backbone.',
checkpoint='https://download.openmmlab.com/mmpose/v1/projects/'
'rtmpose/cspnext-s_udp-aic-coco_210e-256x192-92f5a029_20230130.pth' # noqa
)),
head=dict(
type='RTMCCHead',
in_channels=512,
out_channels=17,
input_size=codec['input_size'],
in_featuremap_size=(6, 8),
simcc_split_ratio=codec['simcc_split_ratio'],
final_layer_kernel_size=7,
gau_cfg=dict(
hidden_dims=256,
s=128,
expansion_factor=2,
dropout_rate=0.,
drop_path=0.,
act_fn='SiLU',
use_rel_bias=False,
pos_enc=False),
loss=dict(
type='KLDiscretLoss',
use_target_weight=True,
beta=10.,
label_softmax=True),
decoder=codec),
test_cfg=dict(flip_test=True))
# base dataset settings
dataset_type = 'HumanArtDataset'
data_mode = 'topdown'
data_root = 'data/'
backend_args = dict(backend='local')
# backend_args = dict(
# backend='petrel',
# path_mapping=dict({
# f'{data_root}': 's3://openmmlab/datasets/detection/coco/',
# f'{data_root}': 's3://openmmlab/datasets/detection/coco/'
# }))
# pipelines
train_pipeline = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
dict(type='RandomHalfBody'),
dict(
type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='mmdet.YOLOXHSVRandomAug'),
dict(
type='Albumentation',
transforms=[
dict(type='Blur', p=0.1),
dict(type='MedianBlur', p=0.1),
dict(
type='CoarseDropout',
max_holes=1,
max_height=0.4,
max_width=0.4,
min_holes=1,
min_height=0.2,
min_width=0.2,
p=1.),
]),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]
val_pipeline = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='PackPoseInputs')
]
train_pipeline_stage2 = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
dict(type='RandomHalfBody'),
dict(
type='RandomBBoxTransform',
shift_factor=0.,
scale_factor=[0.75, 1.25],
rotate_factor=60),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='mmdet.YOLOXHSVRandomAug'),
dict(
type='Albumentation',
transforms=[
dict(type='Blur', p=0.1),
dict(type='MedianBlur', p=0.1),
dict(
type='CoarseDropout',
max_holes=1,
max_height=0.4,
max_width=0.4,
min_holes=1,
min_height=0.2,
min_width=0.2,
p=0.5),
]),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]
# data loaders
train_dataloader = dict(
batch_size=256,
num_workers=10,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='HumanArt/annotations/training_humanart_coco.json',
data_prefix=dict(img=''),
pipeline=train_pipeline,
))
val_dataloader = dict(
batch_size=64,
num_workers=10,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='HumanArt/annotations/validation_humanart.json',
# bbox_file=f'{data_root}HumanArt/person_detection_results/'
# 'HumanArt_validation_detections_AP_H_56_person.json',
data_prefix=dict(img=''),
test_mode=True,
pipeline=val_pipeline,
))
test_dataloader = val_dataloader
# hooks
default_hooks = dict(
checkpoint=dict(save_best='coco/AP', rule='greater', max_keep_ckpts=1))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='mmdet.PipelineSwitchHook',
switch_epoch=max_epochs - stage2_num_epochs,
switch_pipeline=train_pipeline_stage2)
]
# evaluators
val_evaluator = dict(
type='CocoMetric',
ann_file=data_root + 'HumanArt/annotations/validation_humanart.json')
test_evaluator = val_evaluator
| [
"noreply@github.com"
] | open-mmlab.noreply@github.com |
297c1220c8a3ea20bfe4798b655f8387e404042f | 5ceac3827bfae3c2c5634efcdac35e5235dfd75f | /authentication/urls.py | 040c02edf1761c8a0cc62bb979287496d4f2b276 | [] | no_license | jabirjas/jwt-toturial | 132d13a5842d68bbb6732fec1d41a0c0400f79c8 | e4d2bee71e27d01ec75def9e1910e25266ba8eee | refs/heads/master | 2023-02-06T21:53:09.648845 | 2020-12-29T04:03:53 | 2020-12-29T04:03:53 | 325,178,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | from django.urls import path
# from rest_framework_simplejwt.views import (
# TokenObtainPairView,
# TokenRefreshView,
# )
from rest_framework_simplejwt.views import TokenRefreshView
from authentication.views import UserTokenObtainPairView
urlpatterns = [
# path('token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('token/', UserTokenObtainPairView.as_view(), name='user_token_obtain_pair'),
path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
] | [
"jabir@cankado.com"
] | jabir@cankado.com |
7944718fd7fe7c269fefe3472a850144761174d5 | 951036196e1124ae7ea5adaa713b2ca6c14d50a8 | /bb102finals/elephant/models.py | c5cae4d28f10737f728b808b3c9187dc1eec197d | [] | no_license | UnCarter25le/python_Django | fadce2b05bb6a994624d9a615b0b4b888f6da137 | c5771b37db849c3b8dafd11043867f6fea160f38 | refs/heads/master | 2021-07-11T04:33:40.272529 | 2017-10-07T13:26:28 | 2017-10-07T13:26:28 | 106,099,356 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,763 | py | from django.db import models
from django.utils import timezone
# Create your models here.
class maplist(models.Model):
mapName = models.CharField(max_length=60, null=False)
mapDesc = models.TextField(null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
mapTel = models.CharField(max_length=20, null=False)
mapAddr = models.CharField(max_length=60, null=False)
def __str__(self):
return self.mapName
class daanburglar(models.Model):
mapName = models.CharField(max_length=60, null=False)
# mapDesc = models.TextField(null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
id = models.IntegerField(null=False,primary_key=True)
mapAddr = models.CharField(max_length=60, null=False)
mapcityID = models.CharField(max_length=20, null=False)
def __str__(self):
return self.mapName
class daanpets(models.Model):
mapName = models.CharField(max_length=60, null=False)
# mapDesc = models.TextField(null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
id = models.IntegerField(null=False,primary_key=True)
mapAddress = models.CharField(max_length=60, null=False)
mapcityID = models.CharField(max_length=20, null=False)
def __str__(self):
return self.mapName
class daangogoro(models.Model):
mapName = models.CharField(max_length=60, null=False)
# mapDesc = models.TextField(null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
id = models.IntegerField(null=False,primary_key=True)
mapAddress = models.CharField(max_length=60, null=False)
mapcityID = models.CharField(max_length=20, null=False)
def __str__(self):
return self.mapName
class daannoise(models.Model):
mapName = models.CharField(max_length=60, null=False)
# mapDesc = models.TextField(null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
id = models.IntegerField(null=False,primary_key=True)
mapAddress = models.CharField(max_length=60, null=False)
mapcityID = models.CharField(max_length=20, null=False)
def __str__(self):
return self.mapName
class daantemple(models.Model):
mapName = models.CharField(max_length=60, null=False)
# mapDesc = models.TextField(null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
id = models.IntegerField(null=False,primary_key=True)
mapAddress = models.CharField(max_length=60, null=False)
mapcityID = models.CharField(max_length=20, null=False)
def __str__(self):
return self.mapName
class daannarrowroadway(models.Model):
mapName = models.CharField(max_length=60, null=False)
# mapDesc = models.TextField(null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
id = models.IntegerField(null=False,primary_key=True)
mapAddress = models.CharField(max_length=60, null=False)
mapcityID = models.CharField(max_length=20, null=False)
def __str__(self):
return self.mapName
class daanpolice(models.Model):
mapName = models.CharField(max_length=60, null=False)
# mapDesc = models.TextField(null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
id = models.IntegerField(null=False,primary_key=True)
mapAddress = models.CharField(max_length=60, null=False)
mapcityID = models.CharField(max_length=20, null=False)
def __str__(self):
return self.mapName
class daanfiredepart(models.Model):
mapName = models.CharField(max_length=60, null=False)
# mapDesc = models.TextField(null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
id = models.IntegerField(null=False,primary_key=True)
mapAddress = models.CharField(max_length=60, null=False)
mapcityID = models.CharField(max_length=20, null=False)
def __str__(self):
return self.mapName
class daanfuneral(models.Model):
mapName = models.CharField(max_length=60, null=False)
# mapDesc = models.TextField(null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
id = models.IntegerField(null=False,primary_key=True)
mapAddress = models.CharField(max_length=60, null=False)
mapcityID = models.CharField(max_length=20, null=False)
def __str__(self):
return self.mapName
class daanmarkets(models.Model):
mapName = models.CharField(max_length=60, null=False)
# mapDesc = models.TextField(null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
id = models.IntegerField(null=False,primary_key=True)
mapAddress = models.CharField(max_length=60, null=False)
mapcityID = models.CharField(max_length=20, null=False)
def __str__(self):
return self.mapName
class daangas(models.Model):
mapName = models.CharField(max_length=60, null=False)
# mapDesc = models.TextField(null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
id = models.IntegerField(null=False,primary_key=True)
mapAddress = models.CharField(max_length=60, null=False)
mapcityID = models.CharField(max_length=20, null=False)
def __str__(self):
return self.mapName
# mapUrl varchar,\
# mapName varchar,\
# mapAddress varchar,\
# mapLat varchar,\
# mapLng varchar,\
# mapcityID varchar,\
# mapLabel varchar,\
# mapRent varchar,\
# mapLandlord varchar,\
# mapSpace varchar,\
# mapPet varchar,\
# mapSex varchar,\
# mapCook varchar,\
# mapSmoke varchar,\
class daanpets10000(models.Model):
id = models.IntegerField(null=False,primary_key=True)
mapUrl = models.CharField(max_length=60, null=False)
mapName = models.CharField(max_length=60, null=False)
mapAddress = models.CharField(max_length=60, null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
mapcityID = models.CharField(max_length=5, null=False)
mapLabel = models.CharField(max_length=5, null=False)
mapRent = models.CharField(max_length=10, null=False)
mapLandlord = models.CharField(max_length=60, null=False)
mapSpace = models.CharField(max_length=10, null=False)
mapPet = models.CharField(max_length=5, null=False)
mapSex = models.CharField(max_length=5, null=False)
mapCook = models.CharField(max_length=5, null=False)
mapAvgonfood = models.CharField(max_length=5, null=False)
mapSmoke = models.CharField(max_length=5, null=False)
def __str__(self):
return self.mapName
class daanpetsall(models.Model):
id = models.IntegerField(null=False,primary_key=True)
mapUrl = models.CharField(max_length=60, null=False)
mapName = models.CharField(max_length=60, null=False)
mapAddress = models.CharField(max_length=60, null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
mapcityID = models.CharField(max_length=5, null=False)
mapLabel = models.CharField(max_length=5, null=False)
mapRent = models.CharField(max_length=10, null=False)
mapLandlord = models.CharField(max_length=60, null=False)
mapSpace = models.CharField(max_length=10, null=False)
mapPet = models.CharField(max_length=5, null=False)
mapSex = models.CharField(max_length=5, null=False)
mapCook = models.CharField(max_length=5, null=False)
mapAvgonfood = models.CharField(max_length=5, null=False)
mapSmoke = models.CharField(max_length=5, null=False)
def __str__(self):
return self.mapName
class daanroomtest(models.Model):
id = models.IntegerField(null=False,primary_key=True)
mapUrl = models.CharField(max_length=60, null=False)
mapName = models.CharField(max_length=60, null=False)
mapAddress = models.CharField(max_length=60, null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
mapcityID = models.CharField(max_length=5, null=False)
mapLabel = models.CharField(max_length=5, null=False)
mapRent = models.CharField(max_length=10, null=False)
mapLandlord = models.CharField(max_length=60, null=False)
mapSpace = models.CharField(max_length=10, null=False)
mapPet = models.CharField(max_length=5, null=False)
mapSex = models.CharField(max_length=5, null=False)
mapCook = models.CharField(max_length=5, null=False)
mapSmoke = models.CharField(max_length=5, null=False)
def __str__(self):
return self.mapName
class tucheng5km10000(models.Model):
id = models.IntegerField(null=False,primary_key=True)
mapUrl = models.CharField(max_length=60, null=False)
mapName = models.CharField(max_length=60, null=False)
mapAddress = models.CharField(max_length=60, null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
mapcityID = models.CharField(max_length=5, null=False)
mapLabel = models.CharField(max_length=5, null=False)
mapRent = models.CharField(max_length=10, null=False)
mapLandlord = models.CharField(max_length=60, null=False)
mapSpace = models.CharField(max_length=10, null=False)
mapPet = models.CharField(max_length=5, null=False)
mapSex = models.CharField(max_length=5, null=False)
mapCook = models.CharField(max_length=5, null=False)
mapAvgonfood = models.CharField(max_length=5, null=False)
mapSmoke = models.CharField(max_length=5, null=False)
def __str__(self):
return self.mapName
class tucheng3km10000(models.Model):
id = models.IntegerField(null=False,primary_key=True)
mapUrl = models.CharField(max_length=60, null=False)
mapName = models.CharField(max_length=60, null=False)
mapAddress = models.CharField(max_length=60, null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
mapcityID = models.CharField(max_length=5, null=False)
mapLabel = models.CharField(max_length=5, null=False)
mapRent = models.CharField(max_length=10, null=False)
mapLandlord = models.CharField(max_length=60, null=False)
mapSpace = models.CharField(max_length=10, null=False)
mapPet = models.CharField(max_length=5, null=False)
mapSex = models.CharField(max_length=5, null=False)
mapCook = models.CharField(max_length=5, null=False)
mapAvgonfood = models.CharField(max_length=5, null=False)
mapSmoke = models.CharField(max_length=5, null=False)
def __str__(self):
return self.mapName
class tucheng5km(models.Model):
id = models.IntegerField(null=False,primary_key=True)
mapUrl = models.CharField(max_length=60, null=False)
mapName = models.CharField(max_length=60, null=False)
mapAddress = models.CharField(max_length=60, null=False)
mapLat = models.CharField(max_length=20, null=False)
mapLng = models.CharField(max_length=20, null=False)
mapcityID = models.CharField(max_length=5, null=False)
mapLabel = models.CharField(max_length=5, null=False)
mapRent = models.CharField(max_length=10, null=False)
mapLandlord = models.CharField(max_length=60, null=False)
mapSpace = models.CharField(max_length=10, null=False)
mapPet = models.CharField(max_length=5, null=False)
mapSex = models.CharField(max_length=5, null=False)
mapCook = models.CharField(max_length=5, null=False)
mapAvgonfood = models.CharField(max_length=5, null=False)
mapSmoke = models.CharField(max_length=5, null=False)
def __str__(self):
return self.mapName | [
"UnCarter25le@gmail.com"
] | UnCarter25le@gmail.com |
399bd0b1cb94fcc4791e6ffda5196a46fa644e85 | f0706c139ec7e5e30965786481928be8a5a90505 | /pipeline/recognition/tests.py | 4a90d94504f908cb4a4eba4ec0789f872f569918 | [] | no_license | carlosb1/upc-aidl-19-team4 | b17c3ce03233179a1ba2103718f161dba3044b51 | 4e8e681183452019da001191ac2b67e0ff9a09d2 | refs/heads/master | 2022-12-13T06:31:09.292596 | 2019-07-06T14:03:56 | 2019-07-06T14:03:56 | 195,061,097 | 1 | 1 | null | 2022-11-22T04:07:13 | 2019-07-03T13:39:53 | Python | UTF-8 | Python | false | false | 2,840 | py | from params import BuilderTrain, MODE_DATA_SARA, SIMPLE_TRANSFORM, OPTIMIZER_ADAM, NORMAL_TRANSFORM, MODEL_SIAMESE2, MODEL_DECISION_LINEAR, MODEL_DECISION, MODEL_SIAMESE1
from train import run
import sys
if len(sys.argv) > 1 and sys.argv[1] == "aws":
PATH_DATASET = '/root/data/cfp-dataset'
else:
PATH_DATASET = '/home/carlosb/python-workspace/upc-aidl-19-team4/datasets/cfp-dataset'
num_epochs = 30
batch_size = 20
perc_data = 1.0
MODELS_TEST = [
MODEL_SIAMESE1, MODEL_SIAMESE2, MODEL_DECISION_LINEAR, MODEL_DECISION
]
for model in MODELS_TEST:
# small test for dataset sara
print("\n")
print("------------------TESTS for " + model + "-------------------------")
print(".- testing sara dataset... OPTIMIZER_SGD")
params = BuilderTrain(PATH_DATASET).model(model).dataset(
MODE_DATA_SARA, perc_data=perc_data,
batch_size=batch_size).num_epochs(num_epochs).transform(
SIMPLE_TRANSFORM).name_run(model + "_sara_sgd").build()
run(params)
print("------")
print(".- testing sara dataset... OPTIMIZER_ADAM")
params = BuilderTrain(PATH_DATASET).model(model).dataset(
MODE_DATA_SARA, perc_data=perc_data,
batch_size=batch_size).num_epochs(num_epochs).transform(
SIMPLE_TRANSFORM).name_run(model + "_sara_adam").optimizer(
OPTIMIZER_ADAM, lr=1e-3, weight_decay=0.).build()
run(params)
print("------")
# print(".- testing sara dataset... OPTIMIZER_SGD NORMAL_TRANSFORM v2")
# params = BuilderTrain(PATH_DATASET).model(model).dataset(MODE_DATA_SARA, perc_data=perc_data, batch_size=batch_size).num_epochs(num_epochs).transform(NORMAL_TRANSFORM).name_run(model+"_sara_sgd_normtrans_v2").build()
# run(params)
# print("------")
print(".- testing sara dataset... OPTIMIZER_ADAM NORMAL_TRANSFORM v2")
params = BuilderTrain(PATH_DATASET).model(model).dataset(
MODE_DATA_SARA, perc_data=perc_data,
batch_size=batch_size).num_epochs(num_epochs).transform(
NORMAL_TRANSFORM).name_run(model +
"_sara_adam_normtrans_v2").optimizer(
OPTIMIZER_ADAM,
lr=1e-3,
weight_decay=0.).build()
run(params)
print("------")
print(
".- testing sara dataset... OPTIMIZER_ADAM NORMAL_TRANSFORM LR 5e-4 v2"
)
params = BuilderTrain(PATH_DATASET).model(model).dataset(
MODE_DATA_SARA, perc_data=perc_data, batch_size=batch_size).num_epochs(
num_epochs).transform(NORMAL_TRANSFORM).name_run(
model + "_sara_adam_normtrans_lr5e-4_v2").optimizer(
OPTIMIZER_ADAM, lr=5e-4, weight_decay=0.).build()
run(params)
print("------")
| [
"carlos.baezruiz@gmail.com"
] | carlos.baezruiz@gmail.com |
00f660e0f272fcb8cb93a3a9f9780f5e4e343eaa | e82034a344076bb39182dd9083f887859332cfbc | /applications/users/admin.py | 3fe16af878f119bcbc5c5126b3e4f81f99fc05c0 | [] | no_license | ergusto/shoppinglist.website | e9eb47ab1c4426d388b5389a79ec979f4ff6433e | 4836a1a6cc61312b5b9be9249f109a071ba1381d | refs/heads/master | 2022-12-22T19:47:39.078618 | 2019-11-16T12:59:23 | 2019-11-16T12:59:23 | 94,458,073 | 2 | 0 | null | 2022-05-24T23:40:52 | 2017-06-15T16:24:03 | CSS | UTF-8 | Python | false | false | 911 | py | from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from .forms import UserChangeForm, UserCreationForm
from .models import User
class UserAdmin(BaseUserAdmin):
form = UserChangeForm
add_form = UserCreationForm
list_display = ('email', 'is_admin', 'is_active')
list_filter = ('is_admin',)
fieldsets = (
(None, {'fields': ('email', 'password')}),
('Permissions', {'fields': ('is_active', 'is_admin',)}),
('Important dates', {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('email', 'password1', 'password2')}
),
)
search_fields = ('email',)
ordering = ('email',)
filter_horizontal = ()
admin.site.register(User, UserAdmin)
admin.site.unregister(Group)
| [
"fergus.ruston@gmail.com"
] | fergus.ruston@gmail.com |
94729ef5c13805a548fadb8522ab0fe5e05564b9 | e16130e2c7396c9be5bc02570519671f0aa4e314 | /Python/Python_OO.py | 4208bcc4a4c78793e7512f86f9707fab01f34c10 | [] | no_license | MpRonald/TKinter | 713ba746ac438e76407c611e14149bedce273b4e | 8bfce7809cbcb35f86941510a37bf376ff45afdc | refs/heads/master | 2023-06-20T03:24:58.628117 | 2021-07-20T15:35:43 | 2021-07-20T15:35:43 | 387,835,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,398 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 2 17:30:19 2020
Curso Python OO - Parte II
"""
class Programa:
def __init__(self, nome, ano):
self._nome = nome.title()
self.ano = ano
self._likes = 0
@property
def likes(self):
return self._likes
def dar_likes(self):
self._likes += 1
@property
def nome(self):
return self._nome
@nome.setter
def nome(self, nome):
self._nome = nome
def __str__(self):
return f''
class Filme(Programa):
def __init__(self, nome, ano, duracao):
self._nome = nome.title()
self.ano = ano
self.duracao = duracao
self._likes = 0
def __str__(self):
return(f'{self._nome} - {self.ano} - {self.duracao} - {self._likes} likes.')
class Serie(Programa):
def __init__(self, nome, ano, temporadas):
self._nome = nome.title()
self.ano = ano
self.temporadas = temporadas
self._likes = 0
def __str__(self):
return(f'{self._nome} - {self.ano} - {self.temporadas} - {self._likes} likes.')
vingadores = Filme('vingadores - guerra infinita', 2018, 160)
vingadores.dar_likes()
cabana = Filme('A Cabana', 2018, 135)
cabana.dar_likes()
cabana.dar_likes()
cabana.dar_likes()
community = Serie('Community', 2009, 6)
community.dar_likes()
community.dar_likes()
atlanta = Serie('atlanta', 2018, 2)
atlanta.dar_likes()
atlanta.dar_likes()
class Playlist:
def __init__(self, nome, programas):
self.nome = nome
self._programas = programas
def __getitem__(self, item):
return self._programas[item]
@property
def listagem(self):
return self._programas
def __len__(self):
return len(self._programas)
print(f'Nome: {atlanta.nome} - Ano: {atlanta.ano}')
filmes_e_series = [vingadores, atlanta, cabana, community]
playlist_fds = Playlist('Fim de Semana', filmes_e_series)
len(playlist_fds)
for programa in playlist_fds.listagem:
print(programa)
print(f'Tamanho do playlist: {len(playlist_fds)}')
print(playlist_fds[0])
# =============================================================================
# Aula 5
# =============================================================================
class Funcionario:
def __init__(self, nome):
self.nome = nome
def registra_horas(self, horas):
print('Horas registradas...')
def mostra_tarefas(self):
print('Fez muita coisa...')
class Caelum(Funcionario):
def mostra_tarefas(self):
print('Fez muita coisa, Caelumer')
def busca_curso_mes(self, mes=None):
print(f'Mostrando cursos - {mes}' if mes else 'Mostrando curso desse mês')
class Alura(Funcionario):
def mostrar_tarefas(self):
print('Fez muita coisa, Alurete!')
def busca_pergunta_sem_resposta(self):
print('Mostrando perguntas não respondidas no fórum.')
class Hipster:
def __str__(self):
return f'Hipster, {self.nome}'
class Junior(Alura):
pass
class Pleno(Alura, Caelum):
pass
class Senior(Alura, Caelum, Hipster):
pass
jose = Junior()
jose.busca_pergunta_sem_resposta()
luan = Pleno()
luan.busca_pergunta_sem_resposta()
luan.busca_curso_mes
luan.mostrar_tarefas()
luan = Senior('Luan')
print(luan)
| [
"ronald.mp85@gmail.com"
] | ronald.mp85@gmail.com |
38f1848e54986b2542f7d3d133d0e42d9309c0ab | 794b110a87c7cb5b2a92bf9606aa61e9b1ea8e7a | /staging/membership/migrations/0015_auto_20151207_2251.py | 0e1681391dd359b2e41caf78ff084f1598c4f01e | [] | no_license | Ritaotao/MyFirstDashboard | 620707dbe84bc54a0e3bedd2f9539e153f33b3ab | 5bd4c4bcbe8e6a7f0d55b2a66c3cf193e0a2f6fd | refs/heads/master | 2021-04-28T22:11:13.148257 | 2018-03-01T20:33:52 | 2018-03-01T20:33:52 | 77,756,385 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('membership', '0014_auto_20151207_1400'),
]
operations = [
migrations.AlterField(
model_name='completesubscription',
name='payment_token',
field=models.CharField(default=' ', max_length=200, blank=True),
preserve_default=False,
),
]
| [
"ritaotao28@gmail.com"
] | ritaotao28@gmail.com |
1cc3c3e0a40e800b3eca55bc1f2adf1f5bbcee2a | 0fb867b48b5a0bd88f9fefb5cdcad0b4abe720b6 | /calculator.spec | 646bfee63aa626e392df8c449574bd17d9edbe61 | [] | no_license | sparshjaincs/Simple-Calculator | c010181d0ad0bc09719f813e6d91f7b87d990d5d | 76c597c2e59a806c8d8a93ad8b798288639e7da1 | refs/heads/master | 2020-06-21T17:10:16.012591 | 2019-07-18T19:15:22 | 2019-07-18T19:15:22 | 197,511,687 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 817 | spec | # -*- mode: python -*-
block_cipher = None
a = Analysis(['calculator.py'],
pathex=['G:\\Projects\\Calculator'],
binaries=[],
datas=[],
hiddenimports=[],
hookspath=[],
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False)
pyz = PYZ(a.pure, a.zipped_data,
cipher=block_cipher)
exe = EXE(pyz,
a.scripts,
a.binaries,
a.zipfiles,
a.datas,
[],
name='calculator',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
runtime_tmpdir=None,
console=True )
| [
"sparshjaincs@gmail.com"
] | sparshjaincs@gmail.com |
c748b44b12e37ab2d37052f32e197c038c8526bc | 048f9e77449891794adf9d9d19362038cd44a9f9 | /tillfalle1/solutions/uppg3.py | e1cbfc723e675c1cb73a2684f121d76cafb86d32 | [] | no_license | mrkickling/pythonkurs | 9af03381b7fc5924eca13fc21c6db6ee8a5b6cc2 | 1ac8064cf6e055a4774b47c38efdda480f2b806c | refs/heads/master | 2021-12-25T14:57:57.779951 | 2021-12-16T19:51:22 | 2021-12-16T19:51:22 | 217,728,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | # Uppgift 4: Skapa ett program (greeting.py) som tar in ett förnamn
# och ett efternamn och åldern och printar ut en hälsning som använder
# sig av den informationen, t.ex. “Hej Förnamn Efternamn, du är 18 år”
# Ber om förnamn och lagrar det i variabeln fornamn (kommer att vara en sträng)
fornamn = input("Förnamn: ")
# Ber om efternamn och lagrar det i variabeln efternamn (kommer att vara en sträng)
efternamn = input("Efternamn: ")
# Ber om ålder och lagrar i age (ok att det är en sträng, då vi bara ska printa det)
age = input("Ålder: ")
print("Hej " + fornamn + " " + efternamn + "! Du är " + age + " år.")
# Går också med print("Hej", fornamn, efternamn, " du är", age, "år.")
| [
"loxdal@kth.se"
] | loxdal@kth.se |
f57edfd9043ca819fd75eb58c33096ebcf3207b2 | c5e025c124847d816696f93a609a229e60f2a79c | /reconf.py | b55d1c3fe8a8c81addba27238b60a31f96597fed | [] | no_license | abcdnned/pyws | 2902fa8ed2fa3383b67d33575767ebdd51edb873 | 700208d0875ce3b26d289caf5f46bb7d78feaf80 | refs/heads/main | 2023-08-04T09:38:02.980158 | 2023-07-21T01:49:43 | 2023-07-21T01:49:43 | 70,564,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | import os
import os.path
rootdir='D:\\gitrepo\\dp-benchmarks\\acceptance'
pf='acceptance/'
def reconf(d,f):
try:
i=open(f)
lines=i.readlines()
nl=[]
for line in lines:
if 'dp-benchmarks/' in line:
s=line.find('dp-benchmarks/')+14
e=line.find('/',s)
print line[s:e]
line=line.replace(line[s:e],pf+d)
nl.append(line)
i.close()
i=open(f,'w')
i.writelines(nl)
i.flush()
i.close()
except:
pass
for root,dirs,files in os.walk(rootdir):
for d in dirs:
reconf(d,os.path.join(root,d,'conf.json'))
reconf(d,os.path.join(root,d,'templates','file.vm'))
reconf(d,os.path.join(root,d,'templates','file.xml'))
| [
"tom.yang01@sap.com"
] | tom.yang01@sap.com |
dcd630b6a717b1d970ab25b98c828b29ab2968fb | c0bdf1de6b6fae87b12a1815b76c5589b76474d8 | /recapp | d06c0af82f05994b04c4a59788fc008c3f342a37 | [
"Apache-2.0"
] | permissive | Doctor-Gandalf/RecApp | e0ce4af13d21b37ef3b9b223a93671679481333d | 268ff3304c8eb5e77e15e6dd2eec3782437d2a0b | refs/heads/master | 2021-01-16T19:02:43.147855 | 2016-02-27T16:30:06 | 2016-02-27T16:30:06 | 40,322,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | #!/Applications/Kivy.app/Contents/Resources/python
__author__ = 'Kellan Childers'
from kivy.app import App
class RecApp(App):
title = 'RecApp'
kv_file = 'userinterface/mainscreen.kv'
if __name__ == "__main__":
RecApp().run()
| [
"Kellan Childers@gmail.com"
] | Kellan Childers@gmail.com | |
43db25ac408cb01f6cd4ceceb5b478064ab72b2e | ecc350977f5960822d49874952719efa030e1680 | /modules.py | 2e16762111944de5eac0d1b9a709c9e2621639cc | [
"Unlicense"
] | permissive | Hazmatius/project_KID | 899b8b981fedab67f8c4b8a47dca9bd64baa2ccb | 8df14df6b9d87fd4701da17f97265bae9030955d | refs/heads/master | 2020-07-07T04:46:23.127007 | 2019-09-11T01:23:49 | 2019-09-11T01:23:49 | 203,254,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,899 | py | import torch
import torch.nn as nn
from collections import OrderedDict
import os
import numpy as np
import utils as utils
import torch.nn.functional as F
def downsample(x):
x[:, :, 1::2, ::2] = x[:, :, ::2, ::2]
x[:, :, ::2, 1::2] = x[:, :, ::2, ::2]
x[:, :, 1::2, 1::2] = x[:, :, ::2, ::2]
# x[:, :, ::2+1, ::2+1] = 0
return x
def get_batch_params(x):
batch_size = x.shape[0]
bessel = (batch_size - 1) / batch_size
mean = torch.mean(x, 0)
std = torch.sqrt(torch.var(x, 0) * bessel + 1e-05)
return mean, std
# x is the 'source' of downplaying, y is the 'target' of downplaying
def downplay(x, y, factor):
idxs = (torch.sum(x, dim=1, keepdim=True) == 0).repeat(1,x.shape[1],1,1)
y[idxs] = y[idxs] / factor
return y
# sensory model
# responsible for compressing an image of a finger into a 3d vector
class SensoryModel(nn.Module):
def __init__(self, **kwargs):
super(SensoryModel, self).__init__()
self.config = kwargs
self.start_epoch = 0
self.shape = None
self.noise_level = 0
self.conv1 = nn.Conv2d(3, 8, kernel_size=3, padding=0, stride=1, dilation=1)
self.bn1 = nn.BatchNorm2d(8, affine=False)
self.conv2 = nn.Conv2d(8, 16, kernel_size=5, padding=0, stride=2, dilation=1)
self.bn2 = nn.BatchNorm2d(16, affine=False)
self.conv3 = nn.Conv2d(16, 32, kernel_size=5, padding=0, stride=2, dilation=1)
self.bn3 = nn.BatchNorm2d(32, affine=False)
self.conv4 = nn.Conv2d(32, 32, kernel_size=5, padding=0, stride=2, dilation=1)
self.bn4 = nn.BatchNorm2d(32, affine=False)
self.deconv4 = nn.ConvTranspose2d(32, 32, kernel_size=5, padding=0, stride=2, dilation=1)
self.deconv3 = nn.ConvTranspose2d(32, 16, kernel_size=5, padding=0, stride=2, dilation=1)
self.deconv2 = nn.ConvTranspose2d(16, 8, kernel_size=5, padding=0, stride=2, dilation=1)
self.deconv1 = nn.ConvTranspose2d(8, 3, kernel_size=3, padding=0, stride=1, dilation=1)
self.fc1 = nn.Linear(512, 384)
self.bn5 = nn.BatchNorm1d(384, affine=False)
self.fc2 = nn.Linear(384, 258)
self.bn6 = nn.BatchNorm1d(258, affine=False)
self.fc3 = nn.Linear(258, 131)
self.bn7 = nn.BatchNorm1d(131, affine=False)
self.fc4 = nn.Linear(131, 3)
self.bn8 = nn.BatchNorm1d(3, affine=False)
self.defc4 = nn.Linear(3, 131)
self.defc3 = nn.Linear(131, 258)
self.defc2 = nn.Linear(258, 384)
self.defc1 = nn.Linear(384, 512)
self.prelu = nn.PReLU()
self.tanh = nn.Tanh()
def conv_encode(self, x):
noise = (2*torch.rand_like(x)-1) * self.noise_level
x = x + noise
y = self.prelu(self.bn1(self.conv1(x)))
y = self.prelu(self.bn2(self.conv2(y)))
y = self.prelu(self.bn3(self.conv3(y)))
y = self.prelu(self.bn4(self.conv4(y)))
return y
def conv_decode(self, y):
_x = self.prelu(self.deconv4(y))
_x = self.prelu(self.deconv3(_x))
_x = self.prelu(self.deconv2(_x))
_x = self.prelu(self.deconv1(_x))
return _x
def fc_encode(self, x):
self.shape = x.shape
y = x.flatten(1)
y = self.tanh(self.bn5(self.fc1(y)))
y = self.tanh(self.bn6(self.fc2(y)))
y = self.tanh(self.bn7(self.fc3(y)))
y = self.tanh(self.bn8(self.fc4(y)))
return y
def fc_decode(self, y):
x = self.prelu(self.defc4(y))
x = self.prelu(self.defc3(x))
x = self.prelu(self.defc2(x))
x = self.prelu(self.defc1(x))
x = x.view(self.shape)
return x
def encode(self, x):
return self.fc_encode(self.conv_encode(x))
def decode(self, y):
# y = y + (2*torch.rand_like(y)-1)*self.noise_level
return self.conv_decode(self.fc_decode(y))
def forward(self, s):
r = self.encode(s)
_s_ = self.decode(r)
return r, _s_
# perceptual model
class PerceptualModel(nn.Module):
def __init__(self, **kwargs):
super(PerceptualModel, self).__init__()
self.config = kwargs
self.start_epoch = 0
self.noise_level = 0
self.fc1 = nn.Linear(9, 18)
self.bn1 = nn.BatchNorm1d(18, affine=False)
self.fc2 = nn.Linear(18, 36)
self.bn2 = nn.BatchNorm1d(36, affine=False)
self.fc3 = nn.Linear(36, 72)
self.bn3 = nn.BatchNorm1d(72, affine=False)
self.fc4 = nn.Linear(72, 9)
self.bn4 = nn.BatchNorm1d(9, affine=False)
self.defc4 = nn.Linear(9, 72)
self.debn4 = nn.BatchNorm1d(72, affine=False)
self.defc3 = nn.Linear(72, 36)
self.debn3 = nn.BatchNorm1d(36, affine=False)
self.defc2 = nn.Linear(36, 18)
self.debn2 = nn.BatchNorm1d(18, affine=False)
self.defc1 = nn.Linear(18, 3)
self.prelu = nn.PReLU()
self.tanh = nn.Tanh()
def encode(self, r_0, r_1, m_0):
# r_0 is previous state, r_1 is current state, m_0 is previous action
p = torch.cat([r_0, r_1, m_0], 1)
noise = (2*torch.rand_like(p)-1) * self.noise_level
p = p + noise
p = self.prelu(self.bn1(self.fc1(p)))
p = self.prelu(self.bn2(self.fc2(p)))
p = self.prelu(self.bn3(self.fc3(p)))
p = self.tanh(self.bn4(self.fc4(p)))
return p
def decode(self, p):
# p = p + (2*torch.rand_like(p)-1) * self.noise_level
_r_1 = self.prelu(self.debn4(self.defc4(p)))
_r_1 = self.prelu(self.debn3(self.defc3(_r_1)))
_r_1 = self.prelu(self.debn2(self.defc2(_r_1)))
_r_1 = self.defc1(_r_1)
return _r_1
def forward(self, r_0, r_1, m_0):
p = self.encode(r_0, r_1, m_0)
_r_1 = self.decode(p)
return p, _r_1
# forward kinematics model
# takes in a perceptual state and an action, returning a predicted next state
class ForwardKinematicsModel(nn.Module):
def __init__(self, **kwargs):
super(ForwardKinematicsModel, self).__init__()
self.config = kwargs
self.start_epoch = 0
self.fc1 = nn.Linear(9+3, 18)
self.bn1 = nn.BatchNorm1d(18, affine=False)
self.fc2 = nn.Linear(18, 18)
self.bn2 = nn.BatchNorm1d(18, affine=False)
self.fc3 = nn.Linear(18, 9)
self.bn3 = nn.BatchNorm1d(9, affine=False)
self.prelu = nn.PReLU()
self.tanh = nn.Tanh()
def forward(self, p_0, m_0):
x = torch.cat([p_0, m_0], 1)
_p_1 = self.prelu(self.bn1(self.fc1(x)))
_p_1 = self.prelu(self.bn2(self.fc2(_p_1)))
_p_1 = self.tanh(self.bn3(self.fc3(_p_1)))
return _p_1
# inverse kinematics model
# takes in a current perceptual state and a previous perceptual state,
# and predicts the action that caused the transition
class InverseKinematicsModel(nn.Module):
def __init__(self, **kwargs):
super(InverseKinematicsModel, self).__init__()
self.config = kwargs
self.start_epoch = 0
self.indices = ['i-1', 'i', 'i+1']
self.fc1 = nn.Linear(9+9+1, 36)
self.fc_skip = nn.Linear(9+9+1, 3)
self.bn1 = nn.BatchNorm1d(36, affine=False)
self.fc2 = nn.Linear(36, 30)
self.bn2 = nn.BatchNorm1d(30, affine=False)
self.fc3 = nn.Linear(30, 20)
self.bn3 = nn.BatchNorm1d(20, affine=False)
self.fc4 = nn.Linear(20, 10)
self.bn4 = nn.BatchNorm1d(10, affine=False)
self.fc5 = nn.Linear(10, 3)
self.bn5 = nn.BatchNorm1d(3, affine=False)
self.fc6 = nn.Linear(3, 3)
self.prelu = nn.PReLU()
self.tanh = nn.Tanh()
def forward(self, p_0, p_1, dt):
x = torch.cat([p_0, p_1, dt], 1)
_m_0 = self.prelu(self.bn1(self.fc1(x)))
_m_0 = self.prelu(self.bn2(self.fc2(_m_0)))
_m_0 = self.prelu(self.bn3(self.fc3(_m_0)))
_m_0 = self.prelu(self.bn4(self.fc4(_m_0)))
_m_0 = self.tanh(self.bn5(self.fc5(_m_0) + self.fc_skip(x)))
_m_0 = self.fc6(_m_0)
return _m_0
class Mind_of_KID(nn.Module):
def __init__(self, **kwargs):
super(Mind_of_KID, self).__init__()
self.config = kwargs
self.start_epoch = 0
self.indices = ['i-1', 'i', 'i+1']
self.sensory_model = SensoryModel(**kwargs)
self.perceptual_model = PerceptualModel(**kwargs)
self.forward_kinematics_model = ForwardKinematicsModel(**kwargs)
self.inverse_kinematics_model = InverseKinematicsModel(**kwargs)
def set_noise_level(self, noise_level):
self.sensory_model.noise_level = noise_level
self.perceptual_model.noise_level = noise_level
def index(self, var, i):
return var + '_' + self.indices[i+1]
def sensory_encode(self, i, **mvars):
s = mvars[self.index('s', i)]
r, _s = self.sensory_model.forward(s)
mvars[self.index('r', i)] = r
mvars[self.index('~s', i)] = _s
return mvars
def perceptual_encode(self, i, **mvars):
r_0 = mvars[self.index('r', i-1)]
r_1 = mvars[self.index('r', i)]
m_0 = mvars[self.index('m', i-1)]
p, _r_1 = self.perceptual_model.forward(r_0, r_1, m_0)
mvars[self.index('p', i)] = p
mvars[self.index('~r', i)] = _r_1
return mvars
def predict(self, i, **mvars):
p_0 = mvars[self.index('p', i-1)]
m_0 = mvars[self.index('m', i-1)]
_p_1 = self.forward_kinematics_model.forward(p_0, m_0)
mvars[self.index('~p', i)] = _p_1
return mvars
def postdict(self, i, **mvars):
dt = mvars[self.index('dt', i+1)]
p_0 = mvars[self.index('p', i)]
p_1 = mvars[self.index('p', i+1)]
_m_0 = self.inverse_kinematics_model.forward(p_0, p_1, dt)
mvars[self.index('~m', i)] = _m_0
return mvars
def forward(self, **mvars):
# encode sensory states
mvars = self.sensory_encode(-1, **mvars)
mvars = self.sensory_encode(0, **mvars)
mvars = self.sensory_encode(1, **mvars)
mvars = self.perceptual_encode(0, **mvars)
mvars = self.perceptual_encode(1, **mvars)
mvars = self.predict(1, **mvars)
mvars = self.postdict(0, **mvars)
return mvars
def save(self, apath, file='model_latest.pt'):
save_dirs = [os.path.join(apath, file)]
for s in save_dirs:
torch.save(self.state_dict(), s)
def save_model(self, path, filename):
model = {
'model': Mind_of_KID,
'config': self.config,
'state_dict': self.state_dict(),
}
torch.save(model, path + filename)
def load(self, apath, file='model_latest.pt', resume=-1):
load_from = None
kwargs = {}
if resume == -1:
load_from = torch.load(os.path.join(apath, file), **kwargs)
if load_from:
self.load_state_dict(load_from, strict=False)
@staticmethod
def load_model(path, filename):
if torch.cuda.is_available():
checkpoint = torch.load(path + filename, map_location='cuda')
else:
checkpoint = torch.load(path + filename, map_location='cpu')
model = checkpoint['model'](**checkpoint['config'])
model.load_state_dict(checkpoint['state_dict'])
return model
# class KID_Mover(nn.Module):
# def __init__(self, **kwargs):
# super(KID_Mover, self).__init__()
#
# self.config = kwargs
# self.start_epoch = 0
#
#
class KID_Eye(nn.Module):
def __init__(self, **kwargs):
super(KID_Eye, self).__init__()
self.config = kwargs
self.start_epoch = 0
self.conv1 = nn.Conv2d(3, 8, kernel_size=3, padding=0, stride=1, dilation=1)
self.conv2 = nn.Conv2d(8, 16, kernel_size=5, padding=0, stride=2, dilation=1)
self.conv3 = nn.Conv2d(16, 32, kernel_size=5, padding=0, stride=2, dilation=1)
self.conv4 = nn.Conv2d(32, 32, kernel_size=5, padding=0, stride=2, dilation=1)
self.deconv4 = nn.ConvTranspose2d(32, 32, kernel_size=5, padding=0, stride=2, dilation=1)
self.deconv3 = nn.ConvTranspose2d(32, 16, kernel_size=5, padding=0, stride=2, dilation=1)
self.deconv2 = nn.ConvTranspose2d(16, 8, kernel_size=5, padding=0, stride=2, dilation=1)
self.deconv1 = nn.ConvTranspose2d(8, 3, kernel_size=3, padding=0, stride=1, dilation=1)
self.nonlin = nn.PReLU()
def forward(self, **kwargs):
x = kwargs['s_t']
z = self.nonlin(self.conv1(x))
z = self.nonlin(self.conv2(z))
z = self.nonlin(self.conv3(z))
z = self.nonlin(self.conv4(z))
_x_ = self.nonlin(self.deconv4(z))
_x_ = self.nonlin(self.deconv3(_x_))
_x_ = self.nonlin(self.deconv2(_x_))
_x_ = self.nonlin(self.deconv1(_x_))
output = {
'x': x,
'z': z,
'_x_': _x_
}
return output
def save(self, apath, file='model_latest.pt'):
save_dirs = [os.path.join(apath, file)]
for s in save_dirs:
torch.save(self.state_dict(), s)
def save_model(self, path, filename):
model = {
'model': KID_Eye,
'config': self.config,
'state_dict': self.state_dict(),
}
torch.save(model, path + filename)
def load(self, apath, file='model_latest.pt', resume=-1):
load_from = None
kwargs = {}
if resume == -1:
load_from = torch.load(os.path.join(apath, file), **kwargs)
if load_from:
self.load_state_dict(load_from, strict=False)
@staticmethod
def load_model(path, filename):
checkpoint = torch.load(path + filename)
model = checkpoint['model'](**checkpoint['config'])
model.load_state_dict(checkpoint['state_dict'])
return model
| [
"alexbaranski@gmail.com"
] | alexbaranski@gmail.com |
eb07d2a3f8f793245785b8e732d7d785d49671b6 | 282d0a84b45b12359b96bbf0b1d7ca9ee0cb5d19 | /Malware1/venv/Lib/site-packages/scipy/interpolate/fitpack2.py | 0f14d84f30435f315adac039526c16ae5d5cd92f | [] | no_license | sameerakhtar/CyberSecurity | 9cfe58df98495eac6e4e2708e34e70b7e4c055d3 | 594973df27b4e1a43f8faba0140ce7d6c6618f93 | refs/heads/master | 2022-12-11T11:53:40.875462 | 2020-09-07T23:13:22 | 2020-09-07T23:13:22 | 293,598,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:422178a9adf00766a95a781c4d8e1837d120bc65542ddd936c9f14decc375ae8
size 62749
| [
"46763165+sameerakhtar@users.noreply.github.com"
] | 46763165+sameerakhtar@users.noreply.github.com |
8cc677a48f04b190927b1c3641a43c3ee1fa6f66 | f6bb9607669ebddde78700774fbc3d1c75c5978a | /cbsqt/main.py | e8d4c84f0f8f60ebf886a58c2985a2c2d6cbfedd | [] | no_license | Applelovic/cbs_qt | d2d0adf9400f3188a0e04a0584f76e91f3e2cd32 | 98df70fe56215eedf3b13a12b173137a631dacd6 | refs/heads/master | 2020-05-27T17:00:58.020525 | 2019-06-10T10:18:58 | 2019-06-10T10:18:58 | 188,713,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | from cbsqt.config import TimeAxis
from cbsqt.job import JobBook
# from multiprocessing import Pool
def run(start, end, jobs=JobBook):
# multi = Pool()
times = TimeAxis(start, end)
for time_tick in times.elements:
for job in jobs:
job.set_time(time_tick)
if job.run_singal():
# multi.apply_async(job.run_snapshot())
job.run_snapshot()
# multi.close()
# multi.join()
for job in jobs:
job.termination()
if __name__ == '__main__':
run('2018-01-01', '2018-01-31')
print('aaaa')
| [
"applelovic@gmail.com"
] | applelovic@gmail.com |
70274af4500e590ef677311dc84936adacc1a325 | f8032f2c85291232a51397176df2b88c76415144 | /ProjectFiles/project_1/complex_heuristics/assignment.py | ec78ba72284e7512ff8de214e97e1b17b751701e | [] | no_license | jbaker8935/analyzing-the-new-york-subway-dataset | 34ba61cbf7381ab7fc72e36c61c170fbb093bb9f | c221715b2848bff6ee2409a9b43b7c7c477a438c | refs/heads/master | 2021-01-23T07:21:17.308806 | 2015-04-06T17:15:05 | 2015-04-06T17:15:05 | 33,494,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,871 | py | from __future__ import division
import numpy
import pandas
import statsmodels.api as sm
import sys
def complex_heuristic(file_path):
'''
You are given a list of Titantic passengers and their associating
information. More information about the data can be seen at the link below:
http://www.kaggle.com/c/titanic-gettingStarted/data
For this exercise, you need to write a more sophisticated heuristic
that will use the passengers' gender and their social economical class and age
to predict if they survived the Titanic diaster.
You prediction should be 79% accurate or higher.
If the passenger is female or if his/her socio-economical status is high AND
if the passenger is under 18, you should assume the passenger surived.
Otherwise, you should assume the passenger perished in the disaster.
You can access the gender of a passenger via passenger['Sex'].
If the passenger is male, passenger['Sex'] will return a string "male".
If the passenger is female, passenger['Sex'] will return a string "female".
You can access the socio-economical status of a passenger via passenger['Pclass']:
High socio-economical status -- passenger['Pclass'] is 1
Medium socio-economical status -- passenger['Pclass'] is 2
Low socio-economical status -- passenger['Pclass'] is 3
You can access the age of a passenger via passenger['Age'].
Write your prediction back into the "predictions" dictionary. The
key of the dictionary should be the Passenger's id (which can be accessed
via passenger["PassengerId"]) and the associating value should be 1 if the
passenger survied or 0 otherwise.
For example, if a passenger survived:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 1
Or if a passenger perished in the disaster:
passenger_id = passenger['PassengerId']
predictions[passenger_id] = 0
'''
predictions = {}
df = pandas.read_csv(file_path)
for passenger_index, passenger in df.iterrows():
#
# your code here
#
passenger_id = passenger['PassengerId']
if passenger['Sex'] == 'female' or (passenger['Age'] < 18 and passenger['Pclass'] == 1):
predictions[passenger_id] = 1
else:
predictions[passenger_id] = 0
return predictions
def check_accuracy(file_name):
total_count = 0
correct_count = 0
df = pandas.read_csv(file_name)
predictions = complex_heuristic(file_name)
for row_index, row in df.iterrows():
total_count += 1
if predictions[row['PassengerId']] == row['Survived']:
correct_count += 1
return correct_count/total_count
if __name__ == "__main__":
complex_heuristic_success_rate = check_accuracy('titanic_data.csv')
print (complex_heuristic_success_rate)
| [
"farmerjohn1958@comcast.net"
] | farmerjohn1958@comcast.net |
994f2c89af3d7a810699ce6e774d15218202a251 | a07ab09632549e87c3b08dc486fb77d3bfc32a2d | /Micropython/blynklib_mp.py | 42677ca7426a8a2f4047479b92f3b1372c975c20 | [] | no_license | ryszard-raby/IoT | c89a193d79c8043e1d14f818ff59bded4d14a705 | cd92950be433ba7ea2f8e040a726b1eee0497343 | refs/heads/master | 2023-08-11T09:04:56.430429 | 2023-08-05T18:24:23 | 2023-08-05T18:24:23 | 123,737,095 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,304 | py | # Copyright (c) 2019-2020 Anton Morozenko
# Copyright (c) 2015-2019 Volodymyr Shymanskyy.
# See the file LICENSE for copying permission.
__version__ = '0.2.6'
import usocket as socket
import utime as time
import ustruct as struct
import uselect as select
from micropython import const
ticks_ms = time.ticks_ms
sleep_ms = time.sleep_ms
IOError = OSError
LOGO = """
___ __ __
/ _ )/ /_ _____ / /__
/ _ / / // / _ \\/ '_/
/____/_/\\_, /_//_/_/\\_\\
/___/ for Python v{}\n""".format(__version__)
def stub_log(*args):
pass
class BlynkError(Exception):
pass
class RedirectError(Exception):
def __init__(self, server, port):
self.server = server
self.port = port
class Protocol(object):
MSG_RSP = const(0)
MSG_LOGIN = const(2)
MSG_PING = const(6)
MSG_TWEET = const(12)
MSG_EMAIL = const(13)
MSG_NOTIFY = const(14)
MSG_BRIDGE = const(15)
MSG_HW_SYNC = const(16)
MSG_INTERNAL = const(17)
MSG_PROPERTY = const(19)
MSG_HW = const(20)
MSG_REDIRECT = const(41)
MSG_HEAD_LEN = const(5)
STATUS_INVALID_TOKEN = const(9)
STATUS_OK = const(200)
VPIN_MAX_NUM = const(32)
_msg_id = 1
def _get_msg_id(self, **kwargs):
if 'msg_id' in kwargs:
return kwargs['msg_id']
self._msg_id += const(1)
return self._msg_id if self._msg_id <= const(0xFFFF) else const(1)
def _pack_msg(self, msg_type, *args, **kwargs):
data = ('\0'.join([str(curr_arg) for curr_arg in args])).encode('utf-8')
return struct.pack('!BHH', msg_type, self._get_msg_id(**kwargs), len(data)) + data
def parse_response(self, rsp_data, msg_buffer):
msg_args = []
try:
msg_type, msg_id, h_data = struct.unpack('!BHH', rsp_data[:self.MSG_HEAD_LEN])
except Exception as p_err:
raise BlynkError('Message parse error: {}'.format(p_err))
if msg_id == 0:
raise BlynkError('invalid msg_id == 0')
elif h_data >= msg_buffer:
raise BlynkError('Command too long. Length = {}'.format(h_data))
elif msg_type in (self.MSG_RSP, self.MSG_PING):
pass
elif msg_type in (self.MSG_HW, self.MSG_BRIDGE, self.MSG_INTERNAL, self.MSG_REDIRECT):
msg_body = rsp_data[self.MSG_HEAD_LEN: self.MSG_HEAD_LEN + h_data]
msg_args = [itm.decode('utf-8') for itm in msg_body.split(b'\0')]
else:
raise BlynkError("Unknown message type: '{}'".format(msg_type))
return msg_type, msg_id, h_data, msg_args
def heartbeat_msg(self, heartbeat, rcv_buffer):
return self._pack_msg(self.MSG_INTERNAL, 'ver', __version__, 'buff-in', rcv_buffer, 'h-beat', heartbeat,
'dev', 'mpython')
def login_msg(self, token):
return self._pack_msg(self.MSG_LOGIN, token)
def ping_msg(self):
return self._pack_msg(self.MSG_PING)
def response_msg(self, *args, **kwargs):
return self._pack_msg(self.MSG_RSP, *args, **kwargs)
def virtual_write_msg(self, v_pin, *val):
return self._pack_msg(self.MSG_HW, 'vw', v_pin, *val)
def virtual_sync_msg(self, *pins):
return self._pack_msg(self.MSG_HW_SYNC, 'vr', *pins)
def email_msg(self, to, subject, body):
return self._pack_msg(self.MSG_EMAIL, to, subject, body)
def tweet_msg(self, msg):
return self._pack_msg(self.MSG_TWEET, msg)
def notify_msg(self, msg):
return self._pack_msg(self.MSG_NOTIFY, msg)
def set_property_msg(self, pin, prop, *val):
return self._pack_msg(self.MSG_PROPERTY, pin, prop, *val)
def internal_msg(self, *args):
return self._pack_msg(self.MSG_INTERNAL, *args)
class Connection(Protocol):
SOCK_MAX_TIMEOUT = const(5)
SOCK_TIMEOUT = 0.05
EAGAIN = const(11)
ETIMEDOUT = const(60)
RETRIES_TX_DELAY = const(2)
RETRIES_TX_MAX_NUM = const(3)
RECONNECT_SLEEP = const(1)
TASK_PERIOD_RES = const(50)
DISCONNECTED = const(0)
CONNECTING = const(1)
AUTHENTICATING = const(2)
AUTHENTICATED = const(3)
_state = None
_socket = None
_last_rcv_time = 0
_last_ping_time = 0
_last_send_time = 0
def __init__(self, token, server='blynk-cloud.com', port=80, heartbeat=10, rcv_buffer=1024, log=stub_log):
self.token = token
self.server = server
self.port = port
self.heartbeat = heartbeat
self.rcv_buffer = rcv_buffer
self.log = log
def _set_socket_timeout(self, timeout):
p = select.poll()
p.register(self._socket)
p.poll(int(timeout * const(1000)))
def send(self, data):
retries = self.RETRIES_TX_MAX_NUM
while retries > 0:
try:
retries -= 1
self._last_send_time = ticks_ms()
return self._socket.send(data)
except (IOError, OSError):
sleep_ms(self.RETRIES_TX_DELAY)
def receive(self, length, timeout):
d_buff = b''
try:
self._set_socket_timeout(timeout)
d_buff += self._socket.recv(length)
if len(d_buff) >= length:
d_buff = d_buff[:length]
return d_buff
except (IOError, OSError) as err:
if str(err) == 'timed out':
return b''
if str(self.EAGAIN) in str(err) or str(self.ETIMEDOUT) in str(err):
return b''
raise
def is_server_alive(self):
now = ticks_ms()
h_beat_ms = self.heartbeat * const(1000)
rcv_delta = now - self._last_rcv_time
ping_delta = now - self._last_ping_time
send_delta = now - self._last_send_time
if rcv_delta > h_beat_ms + (h_beat_ms // const(2)):
return False
if (ping_delta > h_beat_ms // const(10)) and (send_delta > h_beat_ms or rcv_delta > h_beat_ms):
self.send(self.ping_msg())
self.log('Heartbeat time: {}'.format(now))
self._last_ping_time = now
return True
def _get_socket(self):
try:
self._state = self.CONNECTING
self._socket = socket.socket()
self._socket.connect(socket.getaddrinfo(self.server, self.port)[0][-1])
self._set_socket_timeout(self.SOCK_TIMEOUT)
self.log('Connected to server')
except Exception as g_exc:
raise BlynkError('Server connection failed: {}'.format(g_exc))
def _authenticate(self):
self.log('Authenticating device...')
self._state = self.AUTHENTICATING
self.send(self.login_msg(self.token))
rsp_data = self.receive(self.rcv_buffer, self.SOCK_MAX_TIMEOUT)
if not rsp_data:
raise BlynkError('Auth stage timeout')
msg_type, _, status, args = self.parse_response(rsp_data, self.rcv_buffer)
if status != self.STATUS_OK:
if status == self.STATUS_INVALID_TOKEN:
raise BlynkError('Invalid Auth Token')
if msg_type == self.MSG_REDIRECT:
raise RedirectError(*args)
raise BlynkError('Auth stage failed. Status={}'.format(status))
self._state = self.AUTHENTICATED
self.log('Access granted')
def _set_heartbeat(self):
self.send(self.heartbeat_msg(self.heartbeat, self.rcv_buffer))
rcv_data = self.receive(self.rcv_buffer, self.SOCK_MAX_TIMEOUT)
if not rcv_data:
raise BlynkError('Heartbeat stage timeout')
_, _, status, _ = self.parse_response(rcv_data, self.rcv_buffer)
if status != self.STATUS_OK:
raise BlynkError('Set heartbeat returned code={}'.format(status))
self.log('Heartbeat = {} sec. MaxCmdBuffer = {} bytes'.format(self.heartbeat, self.rcv_buffer))
def connected(self):
return True if self._state == self.AUTHENTICATED else False
class Blynk(Connection):
_CONNECT_TIMEOUT = const(30) # 30sec
_VPIN_WILDCARD = '*'
_VPIN_READ = 'read v'
_VPIN_WRITE = 'write v'
_INTERNAL = 'internal_'
_CONNECT = 'connect'
_DISCONNECT = 'disconnect'
_VPIN_READ_ALL = '{}{}'.format(_VPIN_READ, _VPIN_WILDCARD)
_VPIN_WRITE_ALL = '{}{}'.format(_VPIN_WRITE, _VPIN_WILDCARD)
_events = {}
def __init__(self, token, **kwargs):
Connection.__init__(self, token, **kwargs)
self._start_time = ticks_ms()
self._last_rcv_time = ticks_ms()
self._last_send_time = ticks_ms()
self._last_ping_time = ticks_ms()
self._state = self.DISCONNECTED
print(LOGO)
def connect(self, timeout=_CONNECT_TIMEOUT):
end_time = time.time() + timeout
while not self.connected():
if self._state == self.DISCONNECTED:
try:
self._get_socket()
self._authenticate()
self._set_heartbeat()
self._last_rcv_time = ticks_ms()
self.log('Registered events: {}\n'.format(list(self._events.keys())))
self.call_handler(self._CONNECT)
return True
except BlynkError as b_err:
self.disconnect(b_err)
sleep_ms(self.TASK_PERIOD_RES)
except RedirectError as r_err:
self.disconnect()
self.server = r_err.server
self.port = r_err.port
sleep_ms(self.TASK_PERIOD_RES)
if time.time() >= end_time:
return False
def disconnect(self, err_msg=None):
self.call_handler(self._DISCONNECT)
if self._socket:
self._socket.close()
self._state = self.DISCONNECTED
if err_msg:
self.log('[ERROR]: {}\nConnection closed'.format(err_msg))
time.sleep(self.RECONNECT_SLEEP)
def virtual_write(self, v_pin, *val):
return self.send(self.virtual_write_msg(v_pin, *val))
def virtual_sync(self, *v_pin):
return self.send(self.virtual_sync_msg(*v_pin))
def email(self, to, subject, body):
return self.send(self.email_msg(to, subject, body))
def tweet(self, msg):
return self.send(self.tweet_msg(msg))
def notify(self, msg):
return self.send(self.notify_msg(msg))
def set_property(self, v_pin, property_name, *val):
return self.send(self.set_property_msg(v_pin, property_name, *val))
def internal(self, *args):
return self.send(self.internal_msg(*args))
def handle_event(blynk, event_name):
class Deco(object):
def __init__(self, func):
self.func = func
# wildcard 'read V*' and 'write V*' events handling
if str(event_name).lower() in (blynk._VPIN_READ_ALL, blynk._VPIN_WRITE_ALL):
event_base_name = str(event_name).split(blynk._VPIN_WILDCARD)[0]
for i in range(blynk.VPIN_MAX_NUM + 1):
blynk._events['{}{}'.format(event_base_name.lower(), i)] = func
else:
blynk._events[str(event_name).lower()] = func
def __call__(self):
return self.func()
return Deco
def call_handler(self, event, *args, **kwargs):
if event in self._events.keys():
self.log("Event: ['{}'] -> {}".format(event, args))
self._events[event](*args, **kwargs)
def process(self, msg_type, msg_id, msg_len, msg_args):
if msg_type == self.MSG_RSP:
self.log('Response status: {}'.format(msg_len))
elif msg_type == self.MSG_PING:
self.send(self.response_msg(self.STATUS_OK, msg_id=msg_id))
elif msg_type in (self.MSG_HW, self.MSG_BRIDGE, self.MSG_INTERNAL):
if msg_type == self.MSG_INTERNAL:
self.call_handler("{}{}".format(self._INTERNAL, msg_args[0]), msg_args[1:])
elif len(msg_args) >= const(3) and msg_args[0] == 'vw':
self.call_handler("{}{}".format(self._VPIN_WRITE, msg_args[1]), int(msg_args[1]), msg_args[2:])
elif len(msg_args) == const(2) and msg_args[0] == 'vr':
self.call_handler("{}{}".format(self._VPIN_READ, msg_args[1]), int(msg_args[1]))
def read_response(self, timeout=0.5):
end_time = time.time() + timeout
while time.time() <= end_time:
rsp_data = self.receive(self.rcv_buffer, self.SOCK_TIMEOUT)
if rsp_data:
self._last_rcv_time = ticks_ms()
msg_type, msg_id, h_data, msg_args = self.parse_response(rsp_data, self.rcv_buffer)
self.process(msg_type, msg_id, h_data, msg_args)
def run(self):
if not self.connected():
self.connect()
else:
try:
self.read_response(timeout=self.SOCK_TIMEOUT)
if not self.is_server_alive():
self.disconnect('Server is offline')
except KeyboardInterrupt:
raise
except BlynkError as b_err:
self.log(b_err)
self.disconnect()
except Exception as g_exc:
self.log(g_exc)
| [
"ryszard.raby@gmail.com"
] | ryszard.raby@gmail.com |
f046f12d7b3f16ea03cc78bebd1b08607193f082 | c086a38a366b0724d7339ae94d6bfb489413d2f4 | /PythonEnv/Lib/site-packages/pythonwin/pywin/framework/editor/frame.py | 9e74114d102460a9401b98c1320ac20636a4a733 | [] | no_license | FlowkoHinti/Dionysos | 2dc06651a4fc9b4c8c90d264b2f820f34d736650 | d9f8fbf3bb0713527dc33383a7f3e135b2041638 | refs/heads/master | 2021-03-02T01:14:18.622703 | 2020-06-09T08:28:44 | 2020-06-09T08:28:44 | 245,826,041 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,164 | py | # frame.py - The MDI frame window for an editor.
import pywin.framework.window
import win32ui
import win32con
import afxres
from . import ModuleBrowser
class EditorFrame(pywin.framework.window.MDIChildWnd):
def OnCreateClient(self, cp, context):
# Create the default view as specified by the template (ie, the editor view)
view = context.template.MakeView(context.doc)
# Create the browser view.
browserView = ModuleBrowser.BrowserView(context.doc)
view2 = context.template.MakeView(context.doc)
splitter = win32ui.CreateSplitter()
style = win32con.WS_CHILD | win32con.WS_VISIBLE
splitter.CreateStatic(self, 1, 2, style, win32ui.AFX_IDW_PANE_FIRST)
sub_splitter = self.sub_splitter = win32ui.CreateSplitter()
sub_splitter.CreateStatic(splitter, 2, 1, style, win32ui.AFX_IDW_PANE_FIRST + 1)
# Note we must add the default view first, so that doc.GetFirstView() returns the editor view.
sub_splitter.CreateView(view, 1, 0, (0, 0))
splitter.CreateView(browserView, 0, 0, (0, 0))
sub_splitter.CreateView(view2, 0, 0, (0, 0))
## print "First view is", context.doc.GetFirstView()
## print "Views are", view, view2, browserView
## print "Parents are", view.GetParent(), view2.GetParent(), browserView.GetParent()
## print "Splitter is", splitter
## print "sub splitter is", sub_splitter
## Old
## splitter.CreateStatic (self, 1, 2)
## splitter.CreateView(view, 0, 1, (0,0)) # size ignored.
## splitter.CreateView (browserView, 0, 0, (0, 0))
# Restrict the size of the browser splitter (and we can avoid filling
# it until it is shown)
splitter.SetColumnInfo(0, 10, 20)
# And the active view is our default view (so it gets initial focus)
self.SetActiveView(view)
def GetEditorView(self):
# In a multi-view (eg, splitter) environment, get
# an editor (ie, scintilla) view
# Look for the splitter opened the most!
if self.sub_splitter is None:
return self.GetDlgItem(win32ui.AFX_IDW_PANE_FIRST)
v1 = self.sub_splitter.GetPane(0, 0)
v2 = self.sub_splitter.GetPane(1, 0)
r1 = v1.GetWindowRect()
r2 = v2.GetWindowRect()
if r1[3] - r1[1] > r2[3] - r2[1]:
return v1
return v2
def GetBrowserView(self):
# XXX - should fix this :-)
return self.GetActiveDocument().GetAllViews()[1]
def OnClose(self):
doc = self.GetActiveDocument()
if not doc.SaveModified():
## Cancel button selected from Save dialog, do not actually close
## print 'close cancelled'
return 0
## So the 'Save' dialog doesn't come up twice
doc._obj_.SetModifiedFlag(False)
# Must force the module browser to close itself here (OnDestroy for the view itself is too late!)
self.sub_splitter = None # ensure no circles!
self.GetBrowserView().DestroyBrowser()
return self._obj_.OnClose()
| [
"="
] | = |
8a5b3b98db7d4273488bcd017c8b483873753764 | 91d28b7c2b4e8df88217614453b47e4609720b86 | /practical 2/prac2git1.py | 53216e95d289fb82a50851e1de4331415ad85817 | [] | no_license | adamsut/practical2 | 16039e805e1ce12f21383cf7f9786534698477ea | 1801b8f3ee91219c73b79cfe948e6cb0ba2c57de | refs/heads/master | 2016-08-12T08:04:56.750593 | 2016-04-18T13:05:35 | 2016-04-18T13:05:35 | 51,300,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | import platform as p
print (p.win32_ver())
print (p.machine())
print (p.platform())
| [
"user@193.1.165.232"
] | user@193.1.165.232 |
1c10b542571142609fd929c7bb5db2a96ef660cd | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/H3C-PORTAL-MIB.py | 64fd13746f0c6c4162f4bf5d9b6313b40e2af202 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 30,326 | py | #
# PySNMP MIB module H3C-PORTAL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/H3C-PORTAL-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:10:13 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsIntersection, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsIntersection", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion")
h3cCommon, = mibBuilder.importSymbols("HUAWEI-3COM-OID-MIB", "h3cCommon")
ifIndex, InterfaceIndex = mibBuilder.importSymbols("IF-MIB", "ifIndex", "InterfaceIndex")
InetAddressType, InetAddressPrefixLength, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddressPrefixLength", "InetAddress")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
NotificationType, Bits, TimeTicks, iso, IpAddress, MibIdentifier, Counter64, Unsigned32, Counter32, ModuleIdentity, Integer32, Gauge32, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn = mibBuilder.importSymbols("SNMPv2-SMI", "NotificationType", "Bits", "TimeTicks", "iso", "IpAddress", "MibIdentifier", "Counter64", "Unsigned32", "Counter32", "ModuleIdentity", "Integer32", "Gauge32", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn")
TruthValue, TextualConvention, DisplayString, MacAddress, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "DisplayString", "MacAddress", "RowStatus")
h3cPortal = ModuleIdentity((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99))
if mibBuilder.loadTexts: h3cPortal.setLastUpdated('201111080000Z')
if mibBuilder.loadTexts: h3cPortal.setOrganization('Hangzhou H3C Tech. Co., Ltd.')
h3cPortalConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 1))
h3cPortalMaxUserNumber = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 1, 1), Integer32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cPortalMaxUserNumber.setStatus('current')
h3cPortalCurrentUserNumber = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalCurrentUserNumber.setStatus('current')
h3cPortalStatus = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatus.setStatus('current')
h3cPortalUserNumberUpperLimit = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalUserNumberUpperLimit.setStatus('current')
h3cPortalNasId = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 1, 5), OctetString()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cPortalNasId.setStatus('current')
h3cPortalTables = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2))
h3cPortalServerTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 1), )
if mibBuilder.loadTexts: h3cPortalServerTable.setStatus('current')
h3cPortalServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 1, 1), ).setIndexNames((0, "H3C-PORTAL-MIB", "h3cPortalServerName"))
if mibBuilder.loadTexts: h3cPortalServerEntry.setStatus('current')
h3cPortalServerName = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 1, 1, 1), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: h3cPortalServerName.setStatus('current')
h3cPortalServerUrl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 1, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 127))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cPortalServerUrl.setStatus('current')
h3cPortalServerPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65534))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: h3cPortalServerPort.setStatus('current')
h3cPortalIfInfoTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 2), )
if mibBuilder.loadTexts: h3cPortalIfInfoTable.setStatus('current')
h3cPortalIfInfoEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: h3cPortalIfInfoEntry.setStatus('current')
h3cPortalAuthReqNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalAuthReqNumber.setStatus('current')
h3cPortalAuthSuccNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 2, 1, 2), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalAuthSuccNumber.setStatus('current')
h3cPortalAuthFailNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 2, 1, 3), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalAuthFailNumber.setStatus('current')
h3cPortalIfServerTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 3), )
if mibBuilder.loadTexts: h3cPortalIfServerTable.setStatus('current')
h3cPortalIfServerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 3, 1), ).setIndexNames((0, "H3C-PORTAL-MIB", "h3cPortalIfServerIndex"))
if mibBuilder.loadTexts: h3cPortalIfServerEntry.setStatus('current')
h3cPortalIfServerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: h3cPortalIfServerIndex.setStatus('current')
h3cPortalIfServerUrl = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 3, 1, 2), OctetString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalIfServerUrl.setStatus('current')
h3cPortalIfServerRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 3, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalIfServerRowStatus.setStatus('current')
h3cPortalIfVlanNasIDTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 4), )
if mibBuilder.loadTexts: h3cPortalIfVlanNasIDTable.setStatus('current')
h3cPortalIfVlanNasIDEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 4, 1), ).setIndexNames((0, "H3C-PORTAL-MIB", "h3cPortalIfVlanNasIDIfIndex"), (0, "H3C-PORTAL-MIB", "h3cPortalIfVlanNasIDVlanID"))
if mibBuilder.loadTexts: h3cPortalIfVlanNasIDEntry.setStatus('current')
h3cPortalIfVlanNasIDIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 4, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: h3cPortalIfVlanNasIDIfIndex.setStatus('current')
h3cPortalIfVlanNasIDVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647)))
if mibBuilder.loadTexts: h3cPortalIfVlanNasIDVlanID.setStatus('current')
h3cPortalIfVlanNasIDNasID = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 4, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalIfVlanNasIDNasID.setStatus('current')
h3cPortalSSIDFreeRuleTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 5), )
if mibBuilder.loadTexts: h3cPortalSSIDFreeRuleTable.setStatus('current')
h3cPortalSSIDFreeRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 5, 1), ).setIndexNames((0, "H3C-PORTAL-MIB", "h3cPortalSSIDFreeRuleIndex"))
if mibBuilder.loadTexts: h3cPortalSSIDFreeRuleEntry.setStatus('current')
h3cPortalSSIDFreeRuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: h3cPortalSSIDFreeRuleIndex.setStatus('current')
h3cPortalSSIDFreeRuleSrcSSID = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 5, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(1, 128))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalSSIDFreeRuleSrcSSID.setStatus('current')
h3cPortalSSIDFreeRuleRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 5, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalSSIDFreeRuleRowStatus.setStatus('current')
h3cPortalMacTriggerSrvTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 6), )
if mibBuilder.loadTexts: h3cPortalMacTriggerSrvTable.setStatus('current')
h3cPortalMacTriggerSrvEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 6, 1), ).setIndexNames((0, "H3C-PORTAL-MIB", "h3cPortalMacTriggerSrvIndex"))
if mibBuilder.loadTexts: h3cPortalMacTriggerSrvEntry.setStatus('current')
h3cPortalMacTriggerSrvIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: h3cPortalMacTriggerSrvIndex.setStatus('current')
h3cPortalMacTriggerSrvIPAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 6, 1, 2), InetAddressType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalMacTriggerSrvIPAddrType.setStatus('current')
h3cPortalMacTriggerSrvIP = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 6, 1, 3), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalMacTriggerSrvIP.setStatus('current')
h3cPortalMacTriggerSrvPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 6, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65534))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalMacTriggerSrvPort.setStatus('current')
h3cPortalMacTriggerSrvRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 6, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalMacTriggerSrvRowStatus.setStatus('current')
h3cPortalMacTriggerOnIfTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 7), )
if mibBuilder.loadTexts: h3cPortalMacTriggerOnIfTable.setStatus('current')
h3cPortalMacTriggerOnIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 7, 1), ).setIndexNames((0, "H3C-PORTAL-MIB", "h3cPortalMacTriggerOnIfIfIndex"))
if mibBuilder.loadTexts: h3cPortalMacTriggerOnIfEntry.setStatus('current')
h3cPortalMacTriggerOnIfIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 7, 1, 1), InterfaceIndex())
if mibBuilder.loadTexts: h3cPortalMacTriggerOnIfIfIndex.setStatus('current')
h3cPortalMacTriggerOnIfDetctFlowPeriod = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 7, 1, 2), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalMacTriggerOnIfDetctFlowPeriod.setStatus('current')
h3cPortalMacTriggerOnIfThresholdFlow = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 7, 1, 3), Unsigned32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalMacTriggerOnIfThresholdFlow.setStatus('current')
h3cPortalMacTriggerOnIfRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 7, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalMacTriggerOnIfRowStatus.setStatus('current')
h3cPortalFreeRuleTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8), )
if mibBuilder.loadTexts: h3cPortalFreeRuleTable.setStatus('current')
h3cPortalFreeRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1), ).setIndexNames((0, "H3C-PORTAL-MIB", "h3cPortalFreeRuleIndex"))
if mibBuilder.loadTexts: h3cPortalFreeRuleEntry.setStatus('current')
h3cPortalFreeRuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: h3cPortalFreeRuleIndex.setStatus('current')
h3cPortalFreeRuleSrcIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1, 2), InterfaceIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalFreeRuleSrcIfIndex.setStatus('current')
h3cPortalFreeRuleSrcVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalFreeRuleSrcVlanID.setStatus('current')
h3cPortalFreeRuleSrcMac = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1, 4), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalFreeRuleSrcMac.setStatus('current')
h3cPortalFreeRuleAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1, 5), InetAddressType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalFreeRuleAddrType.setStatus('current')
h3cPortalFreeRuleSrcAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1, 6), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalFreeRuleSrcAddr.setStatus('current')
h3cPortalFreeRuleSrcPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1, 7), InetAddressPrefixLength()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalFreeRuleSrcPrefix.setStatus('current')
h3cPortalFreeRuleDstAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1, 8), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalFreeRuleDstAddr.setStatus('current')
h3cPortalFreeRuleDstPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1, 9), InetAddressPrefixLength()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalFreeRuleDstPrefix.setStatus('current')
h3cPortalFreeRuleProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 6, 17))).clone(namedValues=NamedValues(("invalid", 0), ("tcp", 6), ("udp", 17)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalFreeRuleProtocol.setStatus('current')
h3cPortalFreeRuleSrcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1, 11), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalFreeRuleSrcPort.setStatus('current')
h3cPortalFreeRuleDstPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1, 12), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalFreeRuleDstPort.setStatus('current')
h3cPortalFreeRuleRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 8, 1, 13), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalFreeRuleRowStatus.setStatus('current')
h3cPortalForbiddenRuleTable = MibTable((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9), )
if mibBuilder.loadTexts: h3cPortalForbiddenRuleTable.setStatus('current')
h3cPortalForbiddenRuleEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1), ).setIndexNames((0, "H3C-PORTAL-MIB", "h3cPortalForbiddenRuleIndex"))
if mibBuilder.loadTexts: h3cPortalForbiddenRuleEntry.setStatus('current')
h3cPortalForbiddenRuleIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 2147483647)))
if mibBuilder.loadTexts: h3cPortalForbiddenRuleIndex.setStatus('current')
h3cPortalForbiddenRuleSrcIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1, 2), InterfaceIndex()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalForbiddenRuleSrcIfIndex.setStatus('current')
h3cPortalForbiddenRuleSrcVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1, 3), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalForbiddenRuleSrcVlanID.setStatus('current')
h3cPortalForbiddenRuleSrcMac = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1, 4), MacAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalForbiddenRuleSrcMac.setStatus('current')
h3cPortalForbiddenRuleAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1, 5), InetAddressType()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalForbiddenRuleAddrType.setStatus('current')
h3cPortalForbiddenRuleSrcAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1, 6), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalForbiddenRuleSrcAddr.setStatus('current')
h3cPortalForbiddenRuleSrcPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1, 7), InetAddressPrefixLength()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalForbiddenRuleSrcPrefix.setStatus('current')
h3cPortalForbiddenRuleDstAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1, 8), InetAddress()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalForbiddenRuleDstAddr.setStatus('current')
h3cPortalForbiddenRuleDstPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1, 9), InetAddressPrefixLength()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalForbiddenRuleDstPrefix.setStatus('current')
h3cPortalForbiddenRuleProtocol = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1, 10), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(0, 6, 17))).clone(namedValues=NamedValues(("invalid", 0), ("tcp", 6), ("udp", 17)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalForbiddenRuleProtocol.setStatus('current')
h3cPortalForbiddenRuleSrcPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1, 11), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalForbiddenRuleSrcPort.setStatus('current')
h3cPortalForbiddenRuleDstPort = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1, 12), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalForbiddenRuleDstPort.setStatus('current')
h3cPortalForbiddenRuleRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 2, 9, 1, 13), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: h3cPortalForbiddenRuleRowStatus.setStatus('current')
h3cPortalTraps = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 3))
h3cPortalTrapPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 3, 0))
h3cPortalServerLost = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 3, 0, 1)).setObjects(("H3C-PORTAL-MIB", "h3cPortalServerName"), ("H3C-PORTAL-MIB", "h3cPortalFirstTrapTime"))
if mibBuilder.loadTexts: h3cPortalServerLost.setStatus('current')
h3cPortalServerGet = NotificationType((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 3, 0, 2)).setObjects(("H3C-PORTAL-MIB", "h3cPortalServerName"), ("H3C-PORTAL-MIB", "h3cPortalFirstTrapTime"))
if mibBuilder.loadTexts: h3cPortalServerGet.setStatus('current')
h3cPortalTrapVarObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 3, 1))
h3cPortalFirstTrapTime = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 3, 1, 1), TimeTicks()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: h3cPortalFirstTrapTime.setStatus('current')
h3cPortalStatistic = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4))
h3cPortalStatAuthReq = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatAuthReq.setStatus('current')
h3cPortalStatAckLogout = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatAckLogout.setStatus('current')
h3cPortalStatNotifyLogout = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatNotifyLogout.setStatus('current')
h3cPortalStatChallengeTimeout = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatChallengeTimeout.setStatus('current')
h3cPortalStatChallengeBusy = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatChallengeBusy.setStatus('current')
h3cPortalStatChallengeFail = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatChallengeFail.setStatus('current')
h3cPortalStatAuthTimeout = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatAuthTimeout.setStatus('current')
h3cPortalStatAuthFail = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatAuthFail.setStatus('current')
h3cPortalStatPwdError = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatPwdError.setStatus('current')
h3cPortalStatAuthBusy = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatAuthBusy.setStatus('current')
h3cPortalStatAuthDisordered = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatAuthDisordered.setStatus('current')
h3cPortalStatAuthUnknownError = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatAuthUnknownError.setStatus('current')
h3cPortalStatAuthResp = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 13), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatAuthResp.setStatus('current')
h3cPortalStatChallengeReq = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 14), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatChallengeReq.setStatus('current')
h3cPortalStatChallengeResp = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 4, 15), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalStatChallengeResp.setStatus('current')
h3cPortalPktStatistic = MibIdentifier((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 5))
h3cPortalPktStaReqAuthNum = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 5, 1), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalPktStaReqAuthNum.setStatus('current')
h3cPortalPktStaAckAuthSuccess = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 5, 2), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalPktStaAckAuthSuccess.setStatus('current')
h3cPortalPktStaAckAuthReject = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 5, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalPktStaAckAuthReject.setStatus('current')
h3cPortalPktStaAckAuthEstablish = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 5, 4), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalPktStaAckAuthEstablish.setStatus('current')
h3cPortalPktStaAckAuthBusy = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 5, 5), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalPktStaAckAuthBusy.setStatus('current')
h3cPortalPktStaAckAuthAuthFail = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 5, 6), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalPktStaAckAuthAuthFail.setStatus('current')
h3cPortalPktStaReqChallengeNum = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 5, 7), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalPktStaReqChallengeNum.setStatus('current')
h3cPortalPktStaAckChallengeSuccess = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 5, 8), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalPktStaAckChallengeSuccess.setStatus('current')
h3cPortalPktStaAckChallengeReject = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 5, 9), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalPktStaAckChallengeReject.setStatus('current')
h3cPortalPktStaAckChallengeEstablish = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 5, 10), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalPktStaAckChallengeEstablish.setStatus('current')
h3cPortalPktStaAckChallengeBusy = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 5, 11), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalPktStaAckChallengeBusy.setStatus('current')
h3cPortalPktStaAckChallengeAuthFail = MibScalar((1, 3, 6, 1, 4, 1, 2011, 10, 2, 99, 5, 12), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: h3cPortalPktStaAckChallengeAuthFail.setStatus('current')
mibBuilder.exportSymbols("H3C-PORTAL-MIB", h3cPortalStatistic=h3cPortalStatistic, h3cPortalTables=h3cPortalTables, h3cPortalMacTriggerSrvEntry=h3cPortalMacTriggerSrvEntry, h3cPortalForbiddenRuleSrcPort=h3cPortalForbiddenRuleSrcPort, h3cPortalFirstTrapTime=h3cPortalFirstTrapTime, h3cPortalForbiddenRuleDstPrefix=h3cPortalForbiddenRuleDstPrefix, h3cPortalFreeRuleEntry=h3cPortalFreeRuleEntry, h3cPortalForbiddenRuleRowStatus=h3cPortalForbiddenRuleRowStatus, h3cPortalPktStaAckAuthEstablish=h3cPortalPktStaAckAuthEstablish, h3cPortalStatAuthTimeout=h3cPortalStatAuthTimeout, h3cPortalForbiddenRuleDstAddr=h3cPortalForbiddenRuleDstAddr, h3cPortalPktStaAckAuthAuthFail=h3cPortalPktStaAckAuthAuthFail, h3cPortalForbiddenRuleSrcIfIndex=h3cPortalForbiddenRuleSrcIfIndex, h3cPortalServerLost=h3cPortalServerLost, h3cPortalStatAuthReq=h3cPortalStatAuthReq, h3cPortalPktStaReqChallengeNum=h3cPortalPktStaReqChallengeNum, h3cPortalPktStaAckChallengeBusy=h3cPortalPktStaAckChallengeBusy, h3cPortalFreeRuleAddrType=h3cPortalFreeRuleAddrType, h3cPortalFreeRuleRowStatus=h3cPortalFreeRuleRowStatus, h3cPortalFreeRuleDstPort=h3cPortalFreeRuleDstPort, h3cPortalForbiddenRuleProtocol=h3cPortalForbiddenRuleProtocol, h3cPortalMacTriggerOnIfRowStatus=h3cPortalMacTriggerOnIfRowStatus, h3cPortalStatAuthFail=h3cPortalStatAuthFail, h3cPortalIfVlanNasIDIfIndex=h3cPortalIfVlanNasIDIfIndex, h3cPortalIfInfoTable=h3cPortalIfInfoTable, h3cPortalForbiddenRuleAddrType=h3cPortalForbiddenRuleAddrType, h3cPortalIfVlanNasIDEntry=h3cPortalIfVlanNasIDEntry, h3cPortalMacTriggerOnIfTable=h3cPortalMacTriggerOnIfTable, h3cPortalServerTable=h3cPortalServerTable, h3cPortalFreeRuleDstAddr=h3cPortalFreeRuleDstAddr, h3cPortalMacTriggerSrvRowStatus=h3cPortalMacTriggerSrvRowStatus, h3cPortalFreeRuleSrcPort=h3cPortalFreeRuleSrcPort, h3cPortalFreeRuleTable=h3cPortalFreeRuleTable, h3cPortalIfServerIndex=h3cPortalIfServerIndex, h3cPortalNasId=h3cPortalNasId, h3cPortalIfInfoEntry=h3cPortalIfInfoEntry, h3cPortalForbiddenRuleSrcVlanID=h3cPortalForbiddenRuleSrcVlanID, h3cPortalMacTriggerSrvTable=h3cPortalMacTriggerSrvTable, h3cPortalMacTriggerSrvIP=h3cPortalMacTriggerSrvIP, h3cPortalFreeRuleDstPrefix=h3cPortalFreeRuleDstPrefix, h3cPortalPktStaAckAuthSuccess=h3cPortalPktStaAckAuthSuccess, h3cPortalStatAuthResp=h3cPortalStatAuthResp, h3cPortalTrapPrefix=h3cPortalTrapPrefix, h3cPortalStatAuthUnknownError=h3cPortalStatAuthUnknownError, h3cPortalFreeRuleProtocol=h3cPortalFreeRuleProtocol, h3cPortalStatAuthBusy=h3cPortalStatAuthBusy, h3cPortalFreeRuleSrcIfIndex=h3cPortalFreeRuleSrcIfIndex, h3cPortalStatChallengeBusy=h3cPortalStatChallengeBusy, h3cPortalStatAuthDisordered=h3cPortalStatAuthDisordered, h3cPortalServerGet=h3cPortalServerGet, h3cPortalAuthFailNumber=h3cPortalAuthFailNumber, h3cPortalMacTriggerOnIfEntry=h3cPortalMacTriggerOnIfEntry, h3cPortalIfServerTable=h3cPortalIfServerTable, h3cPortalIfVlanNasIDNasID=h3cPortalIfVlanNasIDNasID, h3cPortalCurrentUserNumber=h3cPortalCurrentUserNumber, h3cPortalIfVlanNasIDVlanID=h3cPortalIfVlanNasIDVlanID, h3cPortalPktStaReqAuthNum=h3cPortalPktStaReqAuthNum, PYSNMP_MODULE_ID=h3cPortal, h3cPortalPktStaAckChallengeAuthFail=h3cPortalPktStaAckChallengeAuthFail, h3cPortalTraps=h3cPortalTraps, h3cPortalSSIDFreeRuleEntry=h3cPortalSSIDFreeRuleEntry, h3cPortalSSIDFreeRuleSrcSSID=h3cPortalSSIDFreeRuleSrcSSID, h3cPortalIfServerRowStatus=h3cPortalIfServerRowStatus, h3cPortalPktStaAckAuthReject=h3cPortalPktStaAckAuthReject, h3cPortalPktStaAckAuthBusy=h3cPortalPktStaAckAuthBusy, h3cPortalForbiddenRuleTable=h3cPortalForbiddenRuleTable, h3cPortalFreeRuleSrcVlanID=h3cPortalFreeRuleSrcVlanID, h3cPortalTrapVarObjects=h3cPortalTrapVarObjects, h3cPortalMacTriggerOnIfThresholdFlow=h3cPortalMacTriggerOnIfThresholdFlow, h3cPortalPktStaAckChallengeEstablish=h3cPortalPktStaAckChallengeEstablish, h3cPortalStatAckLogout=h3cPortalStatAckLogout, h3cPortalStatChallengeFail=h3cPortalStatChallengeFail, h3cPortalAuthReqNumber=h3cPortalAuthReqNumber, h3cPortalServerEntry=h3cPortalServerEntry, h3cPortalForbiddenRuleSrcPrefix=h3cPortalForbiddenRuleSrcPrefix, h3cPortalForbiddenRuleSrcMac=h3cPortalForbiddenRuleSrcMac, h3cPortalForbiddenRuleIndex=h3cPortalForbiddenRuleIndex, h3cPortalForbiddenRuleSrcAddr=h3cPortalForbiddenRuleSrcAddr, h3cPortalStatPwdError=h3cPortalStatPwdError, h3cPortalFreeRuleSrcAddr=h3cPortalFreeRuleSrcAddr, h3cPortalStatus=h3cPortalStatus, h3cPortalMaxUserNumber=h3cPortalMaxUserNumber, h3cPortalStatChallengeResp=h3cPortalStatChallengeResp, h3cPortalFreeRuleSrcPrefix=h3cPortalFreeRuleSrcPrefix, h3cPortalStatNotifyLogout=h3cPortalStatNotifyLogout, h3cPortalIfServerUrl=h3cPortalIfServerUrl, h3cPortal=h3cPortal, h3cPortalPktStaAckChallengeSuccess=h3cPortalPktStaAckChallengeSuccess, h3cPortalForbiddenRuleEntry=h3cPortalForbiddenRuleEntry, h3cPortalSSIDFreeRuleTable=h3cPortalSSIDFreeRuleTable, h3cPortalMacTriggerSrvIPAddrType=h3cPortalMacTriggerSrvIPAddrType, h3cPortalMacTriggerOnIfIfIndex=h3cPortalMacTriggerOnIfIfIndex, h3cPortalFreeRuleIndex=h3cPortalFreeRuleIndex, h3cPortalFreeRuleSrcMac=h3cPortalFreeRuleSrcMac, h3cPortalSSIDFreeRuleIndex=h3cPortalSSIDFreeRuleIndex, h3cPortalStatChallengeTimeout=h3cPortalStatChallengeTimeout, h3cPortalMacTriggerSrvPort=h3cPortalMacTriggerSrvPort, h3cPortalAuthSuccNumber=h3cPortalAuthSuccNumber, h3cPortalPktStaAckChallengeReject=h3cPortalPktStaAckChallengeReject, h3cPortalUserNumberUpperLimit=h3cPortalUserNumberUpperLimit, h3cPortalServerPort=h3cPortalServerPort, h3cPortalStatChallengeReq=h3cPortalStatChallengeReq, h3cPortalPktStatistic=h3cPortalPktStatistic, h3cPortalServerUrl=h3cPortalServerUrl, h3cPortalIfVlanNasIDTable=h3cPortalIfVlanNasIDTable, h3cPortalForbiddenRuleDstPort=h3cPortalForbiddenRuleDstPort, h3cPortalMacTriggerOnIfDetctFlowPeriod=h3cPortalMacTriggerOnIfDetctFlowPeriod, h3cPortalServerName=h3cPortalServerName, h3cPortalIfServerEntry=h3cPortalIfServerEntry, h3cPortalMacTriggerSrvIndex=h3cPortalMacTriggerSrvIndex, h3cPortalConfig=h3cPortalConfig, h3cPortalSSIDFreeRuleRowStatus=h3cPortalSSIDFreeRuleRowStatus)
| [
"dcwangmit01@gmail.com"
] | dcwangmit01@gmail.com |
dc2ac1d722a3ed8080215db7b308155a9db3095a | 1316187f7b5456c5c43832a45794efdb836cd668 | /ENV/bin/alembic | 91da4d9b074617ed5362139508d1265e512c233b | [] | no_license | miaoihan/qulook_flask | daedfb425076835e0cf51fd906602603bb7f3826 | f95fc8feb615e8a5eb6280d9ed86c13aac6bd5f8 | refs/heads/master | 2021-01-10T06:28:37.808712 | 2016-04-13T12:37:13 | 2016-04-13T12:37:13 | 54,899,503 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | #!/home/han/PycharmProjects/qulook/ENV/bin/python2
# EASY-INSTALL-ENTRY-SCRIPT: 'alembic==0.8.5','console_scripts','alembic'
__requires__ = 'alembic==0.8.5'
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point('alembic==0.8.5', 'console_scripts', 'alembic')()
)
| [
"izhanghan@outlook.com"
] | izhanghan@outlook.com | |
66359d09f6399c8788b19dd11a0b6191beaf75ae | 0be06b34542b3ad7cc76ec4a0607f53cdeff6ae5 | /test.py | 54dcc6cc25323658b6ff5046cf42246a9f5589f0 | [] | no_license | radhesh-kunnath/covid19-data-extraction | 10b82c7bbe929e6bba0da3361af1a4790626a1e7 | d9884792e490cb3f3119c9a0d8c6e8266bd3b008 | refs/heads/master | 2022-12-18T20:58:41.188478 | 2020-09-15T17:48:38 | 2020-09-15T17:48:38 | 295,804,571 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | import requests
import pymysql
import schedule
import time
from bs4 import BeautifulSoup
def mycode():
#dbcon=pymysql.connect(host='localhost',user='root',passwd='',db='scrap')
#cursor=dbcon.cursor()
#print("database connected")
URL = "http://covid19.karnataka.gov.in/covid-dashboard/dashboard.html"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
print(soup.prettify())
#results = soup.find_all('table',class_="table table-sm table-striped table-hover table-bordered sortable")
#print(results)
#content = results.find_all('td')
#print(content)
mycode()
"""
schedule.every(5).seconds.do(mycode)
while(1):
schedule.run_pending()
print("wait")
time.sleep(1)
""" | [
"noreply@github.com"
] | radhesh-kunnath.noreply@github.com |
3044b5f1e87199524e827333216ebc4e1128f089 | d12195d2f35c59236fed72ec3af99c2cb102423a | /click_weight.py | 8a12f5e690e2c179d0a284c9992b6b40837acc59 | [] | no_license | anshika01/Machine-Learning-project | 270e08122af991e9a83f348fe76751ee815ab9c3 | fe61993c86b20c42779a8622672bdb760a6b2cf3 | refs/heads/master | 2021-01-19T14:22:34.994527 | 2017-04-06T17:21:27 | 2017-04-06T17:21:27 | 75,592,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,570 | py | import pandas as pd
import numpy as np
from sklearn.cross_validation import KFold
def most_relevant(group, n_max=5):
relevance = group['relevance'].values
hotel_cluster = group['hotel_cluster'].values
relevant = hotel_cluster[np.argsort(relevance)[::-1]][:n_max]
return relevant
filename = "training_2014_sub.csv"
train_data = pd.read_csv(filename)
for click in range(5,70,5):
click1=click/100.0
print click1
kf = KFold(len(train_data), n_folds=3, shuffle = True)
for train_index, test_index in kf:
train= train_data.iloc[train_index,:]
test= train_data.iloc[test_index,:]
grp_agg = train.groupby(['srch_destination_id','hotel_cluster'])['is_booking'].grp_agg(['sum','count'])
grp_agg.reset_index(inplace=True)
grp_agg = grp_agg.groupby(['srch_destination_id','hotel_cluster']).sum().reset_index()
grp_agg['count'] -= grp_agg['sum']
grp_agg = grp_agg.rename(columns={'sum':'bookings','count':'clicks'})
grp_agg['relevance'] = grp_agg['bookings'] + click1 * grp_agg['clicks']
most_rel = grp_agg.groupby(['srch_destination_id']).apply(most_relevant)
most_rel = pd.DataFrame(most_rel).rename(columns={0:'hotel_cluster'})
test = test.merge(most_rel, how='left',left_on=['srch_destination_id'],right_index=True)
test=test.dropna()
preds=[]
for index, row in test.iterrows():
preds.append(row['hotel_cluster_y'])
target = [[l] for l in test["hotel_cluster_x"]]
print metrics.mapk(target, preds, k=5)
| [
"noreply@github.com"
] | anshika01.noreply@github.com |
a2a2518930512317c83f34ef6273bff3efd67fe4 | 88a54c5e2cf3d16e5288261a37840428bf6c4834 | /src/article_loader.py | 8654a31e9a45bdbf8fdbf8d3c4253eac3d4185af | [] | no_license | VitalyRomanov/document-clustering | f2fa1c617ef8f4e2ba69ba0c152d80c919361b25 | 412a21b857b79a644f77b728b8798dda9e854e29 | refs/heads/master | 2022-04-07T22:04:30.804892 | 2018-02-03T18:50:25 | 2018-02-03T18:50:25 | 104,849,940 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,666 | py | import json
import os
# import pickle as p
import joblib as p
from datetime import datetime
import urllib.request
import numpy as np
def date2int(date):
return int(datetime.strptime(date, '%Y-%m-%d %H:%M:%S').timestamp())
def get_date(ts):
return datetime.fromtimestamp(
int(repr(ts))
).strftime('%Y-%m-%d %H:%M:%S')
def load_latest():
dump_file = "articles_dump.dat"
l_time = 1509031277
if os.path.isfile(dump_file):
articles = p.load(open(dump_file, "rb"))
else:
articles = []
return articles
# def retreive_articles(l_time):
# data = json.load(open('1509031277.json'))
# # retreive articles' dates
# dates = list(map(date2int, map(lambda x: x['public_date'], data)))
# # sort articles by date
# s_ind = sorted(range(len(dates)), key=lambda k: dates[k])
# s_data = [data[ind] for ind in s_ind]
# return s_data
def retreive_articles_url(time):
"""
:param time: the last available record, encodes time as integer
:return: list of article records sorted by date
"""
url_addr = "https://www.business-gazeta.ru/index/monitoring/timestamp/%d" % time
data = None
with urllib.request.urlopen(url_addr) as url:
data = json.loads(url.read().decode())
dates = list(map(date2int, map(lambda x: x['public_date'], data)))
# sort articles by date
s_ind = sorted(range(len(dates)), key=lambda k: dates[k])
s_data = [data[ind] for ind in s_ind]
return s_data
def post_json(data_json):
url_addr = "https://www.business-gazeta.ru/index/similar"
enc_json = data_json.encode('utf-8')
req = urllib.request.Request(url_addr, data=enc_json,
headers={'content-type': 'application/json'})
response = urllib.request.urlopen(req)
print(response.read())
# def get_last_time(articles):
# return articles[-1] if len(articles) != 0 else 0
# latest = 0
# for article in articles:
# candidate = date2int(article['public_date'])
# if candidate > latest:
# latest = candidate
# return latest
def get_sections(s_data):
# split data into sections
ids = list(map(lambda x: x['id'], s_data))
titles = list(map(lambda x: x['title'], s_data))
content = list(map(lambda x: x['content'], s_data))
dates = list(map(date2int, map(lambda x: x['public_date'], s_data)))
links = list(map(lambda x: x['link'], s_data))
return ids, titles, content, dates, links
class AData:
ids = None
titles = None
content = None
dates = None
links = None
_TWO_DAYS = 60 * 60 * 24 * 2 # sec*min*hr*2d
def __init__(self):
self.ids = []
self.titles = []
self.content = []
self.dates = []
self.links = []
articles_data = get_sections(load_latest())
self.join_sections(articles_data)
self._latest = self.get_last_time()
self.new = len(self.ids)
def load_new(self):
self._latest = self.get_last_time()
self.new = len(self.ids)
print("Retreiving after %s" % get_date(self._latest), end=": ")
new_articles = retreive_articles_url(self._latest)
articles_data = get_sections(new_articles)
self.join_sections(articles_data)
self.new = len(new_articles)
if self.new == 0:
print("Nothing new")
else:
print("%d added" % self.new)
def join_sections(self, articles_data):
ids, titles, content, dates, links = articles_data
self.ids += ids
self.titles += titles
self.content += content
self.dates += dates
self.links += links
def get_article(self, a_id):
return self.content[a_id]
def get_last_time(self):
return self.dates[-1] if len(self.dates) > 0 else 1509031277
def two_days_range(self, id1, id2):
return True if abs(self.dates[id1] - self.dates[id2]) < self._TWO_DAYS else False
def get_last_two_days(self, a_id):
begin_with = self.ids.index(a_id)
ids = []
for i in range(begin_with, -1, -1):
if self.two_days_range(begin_with, i):
ids.append(i)
else:
break
return np.array(ids)
def make_json(self, doc_id, similar_id):
return json.dumps({"article_id": self.ids[doc_id],
"similar_id": [self.ids[s_id] for s_id in similar_id]},
indent=4)
def get_latest(self, last_id, content_type='titles', filter_bl = True):
"""
Input: last_id - the id in self.ids.
content_type - optional. Specifies whether to return titles or articles'
body
filter_bl - specifies whether to apply blacklist filtering or not
Returns: all documents and ids that appear after the doc with last_id
"""
try:
last_pos = self.ids.index(last_id)
except:
if last_id != -1:
raise Exception("No document with such id")
last_pos = last_id
if content_type == 'titles':
content_source = self.titles
elif content_type == 'content':
content_source = self.content
else:
raise NotImplemented
latest_ids = []
latest_content = []
for i in range(last_pos + 1, len(self.ids)):
if filter_bl and self.is_blacklisted(i):
continue
latest_ids.append(self.ids[i])
latest_content.append(content_source[i])
return {'ids': latest_ids, 'docs': latest_content}
def get_titles(self, last_n=-1):
"""
:param last_n: the number of latest titles to return
:return: dictionary that contains ids and the content of titles
"""
titles_total = len(self.titles)
if last_n == -1:
titles_range = range(titles_total)
else:
titles_range = range(max(titles_total - last_n, 0), titles_total)
titles_ids = []
titles_content = []
for i in titles_range:
if not self.is_blacklisted(i):
titles_ids.append(self.ids[i])
titles_content.append(self.titles[i])
return {'ids': titles_ids, 'titles': titles_content}
def is_blacklisted(self, ind: int) -> bool:
black_list = ['realnoevremya.ru', 'tatcenter.ru']
url = self.links[ind].split("/")[2]
return url in black_list
def load(path):
return p.load(open(path, "rb"))
def save(self,path):
p.dump(self, open(path, "wb"))
| [
"mortiv16@gmail.com"
] | mortiv16@gmail.com |
f4b2e300b62275ca716a10c4df1c4a5ce8822b52 | b286fbf7f9c00664a009cf57a8aa7a671222c61a | /setup.py | ec186450b5af55dcc10a265c5d514d1c5688f00e | [
"MIT"
] | permissive | xiaorancs/feature-select | 860b8ff936018473eeac28ecc0042bb77c5ae43f | 14e216e742542272c40c0249a648fac3df925815 | refs/heads/master | 2020-03-10T00:18:04.577852 | 2019-03-04T02:52:41 | 2019-03-04T02:52:41 | 129,078,564 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | import io
from setuptools import find_packages, setup
def long_description():
with io.open('README.md', 'r', encoding='utf-8') as f:
readme = f.read()
return readme
setup(name='featselector',
version='0.1.0',
description='Pythonic Features Selector by Statistics and Model',
long_description=long_description(),
long_description_content_type="text/markdown",
url='https://github.com/xiaorancs/feature-select',
author='Ran Xiao',
author_email="xiaoranone@gmail.com",
license='MIT',
packages=find_packages(),
platforms = "Linux",
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires = ['requires']
)
| [
"xiaoranone@gmail.com"
] | xiaoranone@gmail.com |
2da083f0f04cbb665f2e9de9010e8d809f839b9a | 79863574ca9d0ad9ca1a4a992d5991bed22120ac | /code/default/php_proxy/local/web_control.py | bbee9de2799f802b2738fb005c58fb2d0a201241 | [
"BSD-2-Clause"
] | permissive | kuanghy/Docker-XX-Net | 447c05030edd992b49190710388159b366e37012 | 91ddaee6be3391ecac289eee16be72c6a5edc949 | refs/heads/master | 2016-09-12T13:13:53.392017 | 2016-05-07T03:04:37 | 2016-05-07T03:04:37 | 58,246,621 | 7 | 5 | null | null | null | null | UTF-8 | Python | false | false | 9,442 | py | #!/usr/bin/env python
# coding:utf-8
import sys
import os
current_path = os.path.dirname(os.path.abspath(__file__))
if __name__ == "__main__":
python_path = os.path.abspath( os.path.join(current_path, os.pardir, os.pardir, 'python27', '1.0'))
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
if sys.platform == "win32":
win32_lib = os.path.abspath( os.path.join(python_path, 'lib', 'win32'))
sys.path.append(win32_lib)
elif sys.platform == "linux" or sys.platform == "linux2":
win32_lib = os.path.abspath( os.path.join(python_path, 'lib', 'linux'))
sys.path.append(win32_lib)
import platform
import BaseHTTPServer
import urlparse
import json
import os
import re
import subprocess
import cgi
import urllib2
import sys
import logging
import ConfigParser
os.environ['HTTPS_PROXY'] = ''
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = os.path.abspath(os.path.join(current_path, os.pardir, os.pardir))
class User_config(object):
php_enable = '1'
php_password = '123456'
php_server = ''
proxy_enable = "0"
proxy_host = ""
proxy_port = ""
proxy_user = ""
proxy_passwd = ""
CONFIG_USER_FILENAME = os.path.abspath( os.path.join(root_path, os.pardir, os.pardir, 'data', 'php_proxy', 'config.ini'))
def __init__(self):
self.load()
def load(self):
ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>[^=\s][^=]*)\s*(?P<vi>[=])\s*(?P<value>.*)$')
CONFIG = ConfigParser.ConfigParser()
try:
if os.path.isfile(self.CONFIG_USER_FILENAME):
CONFIG.read(self.CONFIG_USER_FILENAME)
else:
return
try:
self.php_enable = CONFIG.get('php', 'enable')
self.php_password = CONFIG.get('php', 'password')
self.php_server = CONFIG.get('php', 'fetchserver')
except:
pass
self.proxy_enable = CONFIG.get('proxy', 'enable')
self.proxy_host = CONFIG.get('proxy', 'host')
self.proxy_port = CONFIG.get('proxy', 'port')
self.proxy_user = CONFIG.get('proxy', 'username')
self.proxy_passwd = CONFIG.get('proxy', 'password')
except Exception as e:
logging.warn("User_config.load except:%s", e)
def save(self):
try:
f = open(self.CONFIG_USER_FILENAME, 'w')
f.write("[php]\n")
f.write("enable = %s\n" % self.php_enable)
f.write("password = %s\n" % self.php_password)
f.write("fetchserver = %s\n\n" % self.php_server)
f.write("[proxy]\n")
f.write("enable = %s\n" % self.proxy_enable)
f.write("host = %s\n" % self.proxy_host)
f.write("port = %s\n" % self.proxy_port)
f.write("username = %s\n" % self.proxy_user)
f.write("password = %s\n" % self.proxy_passwd)
f.close()
except:
logging.exception("PHP config save user config fail:%s", self.CONFIG_USER_FILENAME)
user_config = User_config()
def http_request(url, method="GET"):
proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
try:
req = opener.open(url)
except Exception as e:
logging.exception("web_control http_request:%s fail:%s", url, e)
return
class RemoteContralServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
deploy_proc = None
def address_string(self):
return '%s:%s' % self.client_address[:2]
def do_CONNECT(self):
self.wfile.write(b'HTTP/1.1 403\r\nConnection: close\r\n\r\n')
def do_GET(self):
try:
refer = self.headers.getheader('Referer')
netloc = urlparse.urlparse(refer).netloc
if not netloc.startswith("127.0.0.1") and not netloc.startswitch("localhost"):
logging.warn("web control ref:%s refuse", netloc)
return
except:
pass
path = urlparse.urlparse(self.path).path
if path == "/log":
return self.req_log_handler()
elif path == "/config":
return self.req_config_handler()
elif path == "/is_ready":
return self.req_is_ready_handler()
elif path == "/quit":
common.keep_run = False
data = "Quit"
self.wfile.write(('HTTP/1.1 200\r\nContent-Type: %s\r\nContent-Length: %s\r\n\r\n' % ('text/plain', len(data))).encode())
self.wfile.write(data)
sys.exit()
return
else:
logging.debug('PHP Web_control %s %s %s ', self.address_string(), self.command, self.path)
self.wfile.write(b'HTTP/1.1 404\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n404 Not Found')
logging.info('%s "%s %s HTTP/1.1" 404 -', self.address_string(), self.command, self.path)
def do_POST(self):
try:
refer = self.headers.getheader('Referer')
netloc = urlparse.urlparse(refer).netloc
if not netloc.startswith("127.0.0.1") and not netloc.startswitch("localhost"):
logging.warn("web control ref:%s refuse", netloc)
return
except:
pass
logging.debug ('PHP web_control %s %s %s ', self.address_string(), self.command, self.path)
try:
ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))
if ctype == 'multipart/form-data':
self.postvars = cgi.parse_multipart(self.rfile, pdict)
elif ctype == 'application/x-www-form-urlencoded':
length = int(self.headers.getheader('content-length'))
self.postvars = urlparse.parse_qs(self.rfile.read(length), keep_blank_values=1)
else:
self.postvars = {}
except:
self.postvars = {}
path = urlparse.urlparse(self.path).path
if path == "/config":
return self.req_config_handler()
else:
self.wfile.write(b'HTTP/1.1 404\r\nContent-Type: text/plain\r\nConnection: close\r\n\r\n404 Not Found')
logging.info('%s "%s %s HTTP/1.1" 404 -', self.address_string(), self.command, self.path)
def send_response(self, mimetype, data):
self.wfile.write(('HTTP/1.1 200\r\nAccess-Control-Allow-Origin: *\r\nContent-Type: %s\r\nContent-Length: %s\r\n\r\n' % (mimetype, len(data))).encode())
self.wfile.write(data)
def send_file(self, filename, mimetype):
# logging.info('%s "%s %s HTTP/1.1" 200 -', self.address_string(), self.command, self.path)
data = ''
with open(filename, 'rb') as fp:
data = fp.read()
if data:
self.send_response(mimetype, data)
def req_log_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
data = ''
cmd = "get_last"
if reqs["cmd"]:
cmd = reqs["cmd"][0]
if cmd == "set_buffer_size" :
if not reqs["buffer_size"]:
data = '{"res":"fail", "reason":"size not set"}'
mimetype = 'text/plain'
self.send_response(mimetype, data)
return
buffer_size = reqs["buffer_size"][0]
logging.set_buffer_size(buffer_size)
elif cmd == "get_last":
max_line = int(reqs["max_line"][0])
data = logging.get_last_lines(max_line)
elif cmd == "get_new":
last_no = int(reqs["last_no"][0])
data = logging.get_new_lines(last_no)
else:
logging.error('PAC %s %s %s ', self.address_string(), self.command, self.path)
mimetype = 'text/plain'
self.send_response(mimetype, data)
def req_config_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
data = ''
try:
if reqs['cmd'] == ['get_config']:
data = json.dumps(user_config, default=lambda o: o.__dict__)
elif reqs['cmd'] == ['set_config']:
user_config.php_password = self.postvars['php_password'][0]
user_config.php_server = self.postvars['php_server'][0]
user_config.proxy_enable = self.postvars['proxy_enable'][0]
user_config.proxy_host = self.postvars['proxy_host'][0]
user_config.proxy_port = self.postvars['proxy_port'][0]
user_config.proxy_username = self.postvars['proxy_username'][0]
user_config.proxy_password = self.postvars['proxy_password'][0]
user_config.save()
data = '{"res":"success"}'
self.send_response('text/html', data)
http_request("http://127.0.0.1:8085/init_module?module=php_proxy&cmd=restart")
return
except Exception as e:
logging.exception("req_config_handler except:%s", e)
data = '{"res":"fail", "except":"%s"}' % e
self.send_response('text/html', data)
def req_is_ready_handler(self):
data = "True"
mimetype = 'text/plain'
self.send_response(mimetype, data)
if __name__ == "__main__":
pass | [
"sudohuoty@163.com"
] | sudohuoty@163.com |
aa57d2850c1fe3a915d65f8283fcf7f4d983723a | a0fab2d78c23fc21f2fc4b4c75dc7ec4f2509bce | /8_CART/4_treesCompare.py | 5944d676e927e9f70bc568ffa5576501424ad42e | [] | no_license | weiywang/machineLearning | 4361a0c2711ec7232dcccb480de63de572231a75 | bc1ca07b341a7eda557659ac3a34edbd53eea155 | refs/heads/master | 2020-05-23T22:33:20.098939 | 2019-06-03T12:56:37 | 2019-06-03T12:56:37 | 186,976,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,453 | py | '''
Created on Feb 4, 2011
Tree-Based Regression Methods
@author: Peter Harrington
'''
from numpy import *
def loadDataSet(fileName): # general function to parse tab -delimited floats
dataMat = [] # assume last column is target value
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split('\t')
fltLine = map(float, curLine) # map all elements to float()
dataMat.append(fltLine)
return dataMat
def binSplitDataSet(dataSet, feature, value):
mat0 = dataSet[nonzero(dataSet[:, feature] > value)[0], :]
mat1 = dataSet[nonzero(dataSet[:, feature] <= value)[0], :]
return mat0, mat1
def regLeaf(dataSet): # returns the value used for each leaf
return mean(dataSet[:, -1])
def regErr(dataSet):
return var(dataSet[:, -1]) * shape(dataSet)[0]
def linearSolve(dataSet): # helper function used in two places
m, n = shape(dataSet)
X = mat(ones((m, n)));
Y = mat(ones((m, 1))) # create a copy of data with 1 in 0th postion
X[:, 1:n] = dataSet[:, 0:n - 1];
Y = dataSet[:, -1] # and strip out Y
xTx = X.T * X
if linalg.det(xTx) == 0.0:
raise NameError('This matrix is singular, cannot do inverse,\n\
try increasing the second value of ops')
ws = xTx.I * (X.T * Y)
return ws, X, Y
def modelLeaf(dataSet): # create linear model and return coeficients
ws, X, Y = linearSolve(dataSet)
return ws
def modelErr(dataSet):
ws, X, Y = linearSolve(dataSet)
yHat = X * ws
return sum(power(Y - yHat, 2))
def chooseBestSplit(dataSet, leafType=regLeaf, errType=regErr, ops=(1, 4)):
tolS = ops[0];
tolN = ops[1]
# if all the target variables are the same value: quit and return value
if len(set(dataSet[:, -1].T.tolist()[0])) == 1: # exit cond 1
return None, leafType(dataSet)
m, n = shape(dataSet)
# the choice of the best feature is driven by Reduction in RSS error from mean
S = errType(dataSet)
bestS = inf;
bestIndex = 0;
bestValue = 0
for featIndex in range(n - 1):
# for splitVal in set(dataSet[:,featIndex]):
for splitVal in set((dataSet[:, featIndex].T.A.tolist())[0]):
mat0, mat1 = binSplitDataSet(dataSet, featIndex, splitVal)
if (shape(mat0)[0] < tolN) or (shape(mat1)[0] < tolN): continue
newS = errType(mat0) + errType(mat1)
if newS < bestS:
bestIndex = featIndex
bestValue = splitVal
bestS = newS
# if the decrease (S-bestS) is less than a threshold don't do the split
if (S - bestS) < tolS:
return None, leafType(dataSet) # exit cond 2
mat0, mat1 = binSplitDataSet(dataSet, bestIndex, bestValue)
if (shape(mat0)[0] < tolN) or (shape(mat1)[0] < tolN): # exit cond 3
return None, leafType(dataSet)
return bestIndex, bestValue
def createTree(dataSet, leafType=regLeaf, errType=regErr,
ops=(1, 4)): # assume dataSet is NumPy Mat so we can array filtering
feat, val = chooseBestSplit(dataSet, leafType, errType, ops) # choose the best split
if feat == None: return val # if the splitting hit a stop condition return val
retTree = {}
retTree['spInd'] = feat
retTree['spVal'] = val
lSet, rSet = binSplitDataSet(dataSet, feat, val)
retTree['left'] = createTree(lSet, leafType, errType, ops)
retTree['right'] = createTree(rSet, leafType, errType, ops)
return retTree
def isTree(obj):
return (type(obj).__name__ == 'dict')
def getMean(tree):
if isTree(tree['right']): tree['right'] = getMean(tree['right'])
if isTree(tree['left']): tree['left'] = getMean(tree['left'])
return (tree['left'] + tree['right']) / 2.0
def prune(tree, testData):
if shape(testData)[0] == 0: return getMean(tree) # if we have no test data collapse the tree
if (isTree(tree['right']) or isTree(tree['left'])): # if the branches are not trees try to prune them
lSet, rSet = binSplitDataSet(testData, tree['spInd'], tree['spVal'])
if isTree(tree['left']): tree['left'] = prune(tree['left'], lSet)
if isTree(tree['right']): tree['right'] = prune(tree['right'], rSet)
# if they are now both leafs, see if we can merge them
if not isTree(tree['left']) and not isTree(tree['right']):
lSet, rSet = binSplitDataSet(testData, tree['spInd'], tree['spVal'])
errorNoMerge = sum(power(lSet[:, -1] - tree['left'], 2)) + \
sum(power(rSet[:, -1] - tree['right'], 2))
treeMean = (tree['left'] + tree['right']) / 2.0
errorMerge = sum(power(testData[:, -1] - treeMean, 2))
if errorMerge < errorNoMerge:
print "merging"
return treeMean
else:
return tree
else:
return tree
def regTreeEval(model, inDat):
return float(model)
def modelTreeEval(model, inDat):
n = shape(inDat)[1]
X = mat(ones((1, n + 1)))
X[:, 1:n + 1] = inDat
return float(X * model)
def treeForeCast(tree, inData, modelEval=regTreeEval):
if not isTree(tree): return modelEval(tree, inData)
if inData[tree['spInd']] > tree['spVal']:
if isTree(tree['left']):
return treeForeCast(tree['left'], inData, modelEval)
else:
return modelEval(tree['left'], inData)
else:
if isTree(tree['right']):
return treeForeCast(tree['right'], inData, modelEval)
else:
return modelEval(tree['right'], inData)
def createForeCast(tree, testData, modelEval=regTreeEval):
m = len(testData)
yHat = mat(zeros((m, 1)))
for i in range(m):
yHat[i, 0] = treeForeCast(tree, mat(testData[i]), modelEval)
return yHat
# if __name__ == '__main__':
# trainMat = mat(loadDataSet('bikeSpeedVsIq_train.txt'))
# testMat = mat(loadDataSet('bikeSpeedVsIq_test.txt'))
# myTree = createTree(trainMat, ops = (1, 20))
# yHat = createForeCast(myTree, testMat[:, 0])
# print corrcoef(yHat, testMat[:, 1], rowvar = 0)[0, 1]
# myModelTree = createTree(trainMat,modelLeaf, modelErr, ops=(1, 20))
# yModelHat = createForeCast(myModelTree, testMat[:, 0], modelTreeEval)
# print corrcoef(yModelHat, testMat[:, 1], rowvar=0)[0, 1]
# ws, X, Y = linearSolve(trainMat)
# for i in range(shape(testMat)[0]):
# yHat[i] = testMat[i, 0] * ws[1, 0] + ws[0, 0]
# print corrcoef(yHat, testMat[:, 1], rowvar = 0)[0, 1] | [
"sa517370@mail.ustc.edu.cn"
] | sa517370@mail.ustc.edu.cn |
be1d104b2f9883aeb4d68360c1c230337ff776cd | 3740de0d6e43ea140fc09ab314e4c492603ba185 | /scripts/sources/S_EstimateQuantileMixtureStressTest.py | fb9d5a602ba8a987d718463a4796f474687762ad | [
"MIT"
] | permissive | s0ap/arpmRes | 29c60c65fd3e11be1cc31d46494e5b3ebf6e05ab | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | refs/heads/master | 2022-02-16T05:01:22.118959 | 2019-08-20T16:45:02 | 2019-08-20T16:45:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,523 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # S_EstimateQuantileMixtureStressTest [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_EstimateQuantileMixtureStressTest&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=e-sta-ssessq-uant-copy-1).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
import numpy as np
from numpy import arange, zeros, var, \
mean
from numpy.random import rand
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, bar, legend, subplots, title
plt.style.use('seaborn')
from ARPM_utils import save_plot
from QuantileMixture import QuantileMixture
# -
# ## Compute error, bias and inefficiency for every estimator and for every DGP within the stress-test set
# +
# define estimators
g_b = lambda X: mean(X, 1, keepdims=True)
g_e = lambda X: np.median(X, 1, keepdims=True)
# generate the scenarios for the time series
t_ = 50
j_ = 10 ** 4
alpha = 0.5
sigma_Y = 0.2
mu_Z = 0
sigma_Z = 0.15
S = arange(0,0.22,0.02) # stress-test set for parameter mu_Y
k_ = len(S)
I = zeros((j_, t_))
er_b = zeros(k_)
er_e = zeros(k_)
bias2_b = zeros(k_)
bias2_e = zeros(k_)
inef2_b = zeros(k_)
inef2_e = zeros(k_)
for k in range(k_):
# compute the true value of the property
mu_Y = S[k]
g_f = QuantileMixture(0.5, alpha, mu_Y, sigma_Y, mu_Z, sigma_Z)
# generate j_ simulations of the time series
P = rand(j_, t_)
for j in range(j_):
I[j,:] = QuantileMixture(P[j, :], alpha, mu_Y, sigma_Y, mu_Z, sigma_Z)
# compute simulations of the estimators
G_b = g_b(I)
G_e = g_e(I)
# compute the losses of the estimators
L_b = (G_b - g_f) ** 2
L_e = (G_e - g_f) ** 2
# compute errors
er_b[k] = mean(L_b)
er_e[k] = mean(L_e)
# compute square bias
bias2_b[k] = (mean((G_b) - g_f)) ** 2
bias2_e[k] = (mean((G_e) - g_f)) ** 2
# compute square inefficiency
inef2_b[k] = var(G_b, ddof=1)
inef2_e[k] = var(G_e, ddof=1)
# -
# ## Compute robust and ensemble errors
# +
er_rob_b = max(er_b)
er_rob_e = max(er_e)
er_ens_b = mean(er_b)
er_ens_e = mean(er_e)
# -
# ## Determine the optimal estimator
# best robust estimator
er_rob = min([er_rob_b, er_rob_e]),
# best ensemble estimator
er_ens = min([er_ens_b, er_ens_e])
# ## plot error, bias and inefficiency for each DGP within the stress-test set
# +
red = [.9, .4, 0]
blue = [0, .45, .7]
f, ax = subplots(2,1)
plt.sca(ax[0])
b = bar(range(1,k_+1),bias2_b.T+inef2_b.T, facecolor= red, label='bias$^2$')
b = bar(range(1,k_+1),inef2_b.T,facecolor= blue,label='ineff$^2$')
plot(range(1,k_+1), er_b, 'k',lw=1.5, label='error')
plt.xticks(range(0,k_+2,2))
legend()
title('stress-test of estimator b')
plt.sca(ax[1])
b = bar(range(1,k_+1),bias2_e.T+inef2_e.T,facecolor= red)
b = bar(range(1,k_+1),inef2_e.T,facecolor= blue)
plot(range(1,k_+1), er_e, 'k',lw= 1.5)
plt.xticks(range(0,k_+2,2))
title('stress-test of estimator e')
plt.tight_layout();
plt.show()
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1])
| [
"dario.popadic@yahoo.com"
] | dario.popadic@yahoo.com |
a5cc2d718c689f5c8e0a542115d38f94e4047061 | 4d42dc68dea307339af72a57a535a945b7c2b226 | /source/accounts/migrations/0005_auto_20191104_1030.py | af20585508202c46786cc07a1cd94eb392e8c625 | [] | no_license | EvgeniiLipatov/tracker | 2773f41c92dffaabac5d0acd127c62c0a0952771 | 74c12590ea62cb149b8e8a0452e89ea1641c3359 | refs/heads/master | 2022-12-08T06:36:25.865479 | 2019-12-11T01:05:27 | 2019-12-11T01:05:27 | 211,312,117 | 0 | 0 | null | 2022-11-22T04:47:36 | 2019-09-27T12:16:15 | Python | UTF-8 | Python | false | false | 482 | py | # Generated by Django 2.2 on 2019-11-04 10:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20191104_0119'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='avatar',
field=models.ImageField(blank=True, default='user_pics/angry_cat.jpg', null=True, upload_to='user_pics', verbose_name='Аватар'),
),
]
| [
"lipatowqevgeniy@gmail.com"
] | lipatowqevgeniy@gmail.com |
78143c4e6942051b155a1e0dc181ef0d38715934 | c67f2d0677f8870bc1d970891bbe31345ea55ce2 | /zippy/lib-python/3/test/test_genexps.py | cc75ac26ee667116ef05274e3e3a41516ae62aeb | [
"BSD-3-Clause"
] | permissive | securesystemslab/zippy | a5a1ecf5c688504d8d16128ce901406ffd6f32c2 | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | refs/heads/master | 2022-07-05T23:45:36.330407 | 2018-07-10T22:17:32 | 2018-07-10T22:17:32 | 67,824,983 | 324 | 27 | null | null | null | null | UTF-8 | Python | false | false | 7,149 | py | doctests = """
Test simple loop with conditional
>>> sum(i*i for i in range(100) if i&1 == 1)
166650
Test simple nesting
>>> list((i,j) for i in range(3) for j in range(4) )
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
Test nesting with the inner expression dependent on the outer
>>> list((i,j) for i in range(4) for j in range(i) )
[(1, 0), (2, 0), (2, 1), (3, 0), (3, 1), (3, 2)]
Make sure the induction variable is not exposed
>>> i = 20
>>> sum(i*i for i in range(100))
328350
>>> i
20
Test first class
>>> g = (i*i for i in range(4))
>>> type(g)
<class 'generator'>
>>> list(g)
[0, 1, 4, 9]
Test direct calls to next()
>>> g = (i*i for i in range(3))
>>> next(g)
0
>>> next(g)
1
>>> next(g)
4
>>> next(g)
Traceback (most recent call last):
File "<pyshell#21>", line 1, in -toplevel-
next(g)
StopIteration
Does it stay stopped?
>>> next(g)
Traceback (most recent call last):
File "<pyshell#21>", line 1, in -toplevel-
next(g)
StopIteration
>>> list(g)
[]
Test running gen when defining function is out of scope
>>> def f(n):
... return (i*i for i in range(n))
>>> list(f(10))
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
>>> def f(n):
... return ((i,j) for i in range(3) for j in range(n))
>>> list(f(4))
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
>>> def f(n):
... return ((i,j) for i in range(3) for j in range(4) if j in range(n))
>>> list(f(4))
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
>>> list(f(2))
[(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)]
Verify that parenthesis are required in a statement
>>> def f(n):
... return i*i for i in range(n)
Traceback (most recent call last):
...
SyntaxError: invalid syntax
Verify that parenthesis are required when used as a keyword argument value
>>> dict(a = i for i in range(10))
Traceback (most recent call last):
...
SyntaxError: invalid syntax
Verify that parenthesis are required when used as a keyword argument value
>>> dict(a = (i for i in range(10))) #doctest: +ELLIPSIS
{'a': <generator object <genexpr> at ...>}
Verify early binding for the outermost for-expression
>>> x=10
>>> g = (i*i for i in range(x))
>>> x = 5
>>> list(g)
[0, 1, 4, 9, 16, 25, 36, 49, 64, 81]
Verify that the outermost for-expression makes an immediate check
for iterability
>>> (i for i in 6)
Traceback (most recent call last):
File "<pyshell#4>", line 1, in -toplevel-
(i for i in 6)
TypeError: 'int' object is not iterable
Verify late binding for the outermost if-expression
>>> include = (2,4,6,8)
>>> g = (i*i for i in range(10) if i in include)
>>> include = (1,3,5,7,9)
>>> list(g)
[1, 9, 25, 49, 81]
Verify late binding for the innermost for-expression
>>> g = ((i,j) for i in range(3) for j in range(x))
>>> x = 4
>>> list(g)
[(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3)]
Verify re-use of tuples (a side benefit of using genexps over listcomps)
>>> from test.support import check_impl_detail
>>> tupleids = list(map(id, ((i,i) for i in range(10))))
>>> int(max(tupleids) - min(tupleids)) if check_impl_detail() else 0
0
Verify that syntax error's are raised for genexps used as lvalues
>>> (y for y in (1,2)) = 10
Traceback (most recent call last):
...
SyntaxError: can't assign to generator expression
>>> (y for y in (1,2)) += 10
Traceback (most recent call last):
...
SyntaxError: can't assign to generator expression
########### Tests borrowed from or inspired by test_generators.py ############
Make a generator that acts like range()
>>> yrange = lambda n: (i for i in range(n))
>>> list(yrange(10))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Generators always return to the most recent caller:
>>> def creator():
... r = yrange(5)
... print("creator", next(r))
... return r
>>> def caller():
... r = creator()
... for i in r:
... print("caller", i)
>>> caller()
creator 0
caller 1
caller 2
caller 3
caller 4
Generators can call other generators:
>>> def zrange(n):
... for i in yrange(n):
... yield i
>>> list(zrange(5))
[0, 1, 2, 3, 4]
Verify that a gen exp cannot be resumed while it is actively running:
>>> g = (next(me) for i in range(10))
>>> me = g
>>> next(me)
Traceback (most recent call last):
File "<pyshell#30>", line 1, in -toplevel-
next(me)
File "<pyshell#28>", line 1, in <generator expression>
g = (next(me) for i in range(10))
ValueError: generator already executing
Verify exception propagation
>>> g = (10 // i for i in (5, 0, 2))
>>> next(g)
2
>>> next(g)
Traceback (most recent call last):
File "<pyshell#37>", line 1, in -toplevel-
next(g)
File "<pyshell#35>", line 1, in <generator expression>
g = (10 // i for i in (5, 0, 2))
ZeroDivisionError: integer division or modulo by zero
>>> next(g)
Traceback (most recent call last):
File "<pyshell#38>", line 1, in -toplevel-
next(g)
StopIteration
Make sure that None is a valid return value
>>> list(None for i in range(10))
[None, None, None, None, None, None, None, None, None, None]
Check that generator attributes are present
>>> g = (i*i for i in range(3))
>>> expected = set(['gi_frame', 'gi_running'])
>>> set(attr for attr in dir(g) if not attr.startswith('__')) >= expected
True
>>> print(g.__next__.__doc__)
x.__next__() <==> next(x)
>>> import types
>>> isinstance(g, types.GeneratorType)
True
Check the __iter__ slot is defined to return self
>>> iter(g) is g
True
Verify that the running flag is set properly
>>> g = (me.gi_running for i in (0,1))
>>> me = g
>>> me.gi_running
0
>>> next(me)
1
>>> me.gi_running
0
Verify that genexps are weakly referencable
>>> import weakref
>>> g = (i*i for i in range(4))
>>> wr = weakref.ref(g)
>>> wr() is g
True
>>> p = weakref.proxy(g)
>>> list(p)
[0, 1, 4, 9]
"""
__test__ = {'doctests' : doctests}
def test_main(verbose=None):
import sys
from test import support
from test import test_genexps
support.run_doctest(test_genexps, verbose)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_doctest(test_genexps, verbose)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == "__main__":
test_main(verbose=True)
| [
"thezhangwei@gmail.com"
] | thezhangwei@gmail.com |
44acbae65b649a3f770891b1339416a2a6df77d6 | 93d175f845172504e245a72ea74352d37fb12ce4 | /chapter4/c4allMyCats2.py | 4b64839c42ab82b449633bad19b2b031f563f74a | [] | no_license | tatsuei/atbswp-code | de4be39b3043d6be2e54bbe36e5fa4a14a0d72f5 | b17254205f7e58765607a64f34c1da7d1accefd2 | refs/heads/main | 2023-02-28T11:02:17.849839 | 2021-01-26T13:47:51 | 2021-01-26T13:47:51 | 332,991,524 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | import re # Regular expression library
import sys
# Checking user input to see if they wanna restart the program
def checkRes():
noNames = input('Aww. They don\'t have names yet. Wanna name them?'
' (Y/N)\n')
return noNames
def floofNaming():
floofNames = []
while True:
print('Hello! Give this smol floof a name?', 'Floof #' +
str(len(floofNames) + 1), '(Or press enter to skip.):')
name = input()
if name == '':
break
elif not re.match("^[a-z]*$", name, flags=re.I):
print('Hey! Names begin with letters! L E T T E R S!')
continue
floofNames = floofNames + [name]
# Checks for empty array using Boolean logic, ie fN != fN == null/None
if not floofNames:
while True:
userRes = checkRes()
if userRes.lower() == 'y':
floofNaming()
elif userRes.lower() == 'n':
print('Okay :( You should give them names next time! :D')
sys.exit()
else:
continue
else:
print('So, our floofs are called:')
for name in floofNames:
print(' ' + name)
floofNaming()
# elif not re.match("^[a-z]*$", name, flags=re.I):
# Regex checking for letters, 're.I' where I = ignorecase.
# expression meaning: 'if any (*) character at the start (^)
# or end ($) doesn't match letters between a-z (represented
# by [a-z] with the square brackets meaning the set of
# characters to match
| [
"eiji@tutanota.com"
] | eiji@tutanota.com |
5eccf13eebc94af807b3d12ca1a02e2097936d43 | 080529b6dcc745c0dbbb95080a47575e15de5ee5 | /mysite/projectManager/migrations/0005_auto_20160723_1442.py | 85607bb23b004c90c382c9d8a5c699c4602e7149 | [] | no_license | paryoja/expManager | 2a5a33e2bed710069e08c81d27715f91bee0ebd1 | 162205ba0e2474871cdc92118122a0678bce0f5c | refs/heads/master | 2021-01-11T18:31:04.508688 | 2017-09-01T07:12:36 | 2017-09-01T07:12:36 | 79,559,486 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-23 05:42
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projectManager', '0004_auto_20160723_1432'),
]
operations = [
migrations.AlterField(
model_name='expitem',
name='dataset',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='projectManager.Dataset'),
),
]
| [
"yjpark@kdd.snu.ac.kr"
] | yjpark@kdd.snu.ac.kr |
d74cb975867f89c208482325ea88b95bfab1eb3b | 1d1a0b45f91c68ae26d6df65726b0b4134810ef8 | /main/decorators.py | b6b8db24c552d4a9378259b7eb06d2b6be891207 | [] | no_license | SergioAnanias/amigos | b5230ca2d69fcaaca0592fc1eb7ef438511ba550 | b11b6fddeb5d7e9715bee81f16c11d4872eda5ec | refs/heads/main | 2023-06-26T03:10:41.660508 | 2021-07-24T21:00:34 | 2021-07-24T21:00:34 | 389,376,404 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | py | from django.shortcuts import render, HttpResponse, redirect
def loginauth(func):
def wrapper(request):
if not 'user' in request.session or not request.session['user']:
return redirect("/login")
return func(request)
return wrapper | [
"sergio.ananias.o@gmail.com"
] | sergio.ananias.o@gmail.com |
43d2678fe00adbaa6aeb89d3ac85cee449782bf5 | 2ed86a79d0fcd299ad4a01310954c5eddcf01edf | /homeassistant/components/tankerkoenig/binary_sensor.py | 5f10b54f7042763cd7b371c8f9cef7f5b76c43ec | [
"Apache-2.0"
] | permissive | konnected-io/home-assistant | 037f12c87bb79e19220192eb918e49db1b1a8b3e | 2e65b77b2b5c17919939481f327963abdfdc53f0 | refs/heads/dev | 2023-05-11T08:57:41.891518 | 2023-05-07T20:03:37 | 2023-05-07T20:03:37 | 109,931,626 | 24 | 10 | Apache-2.0 | 2023-02-22T06:24:01 | 2017-11-08T05:27:21 | Python | UTF-8 | Python | false | false | 2,257 | py | """Tankerkoenig binary sensor integration."""
from __future__ import annotations
import logging
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_LATITUDE, ATTR_LONGITUDE
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import TankerkoenigCoordinatorEntity, TankerkoenigDataUpdateCoordinator
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the tankerkoenig binary sensors."""
coordinator: TankerkoenigDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
stations = coordinator.stations.values()
entities = []
for station in stations:
sensor = StationOpenBinarySensorEntity(
station,
coordinator,
coordinator.show_on_map,
)
entities.append(sensor)
_LOGGER.debug("Added sensors %s", entities)
async_add_entities(entities)
class StationOpenBinarySensorEntity(TankerkoenigCoordinatorEntity, BinarySensorEntity):
"""Shows if a station is open or closed."""
_attr_device_class = BinarySensorDeviceClass.DOOR
def __init__(
self,
station: dict,
coordinator: TankerkoenigDataUpdateCoordinator,
show_on_map: bool,
) -> None:
"""Initialize the sensor."""
super().__init__(coordinator, station)
self._station_id = station["id"]
self._attr_name = (
f"{station['brand']} {station['street']} {station['houseNumber']} status"
)
self._attr_unique_id = f"{station['id']}_status"
if show_on_map:
self._attr_extra_state_attributes = {
ATTR_LATITUDE: station["lat"],
ATTR_LONGITUDE: station["lng"],
}
@property
def is_on(self) -> bool | None:
"""Return true if the station is open."""
data: dict = self.coordinator.data[self._station_id]
return data is not None and data.get("status") == "open"
| [
"noreply@github.com"
] | konnected-io.noreply@github.com |
4eea37a29cef71bf3177edc33f57ff5d01aedb49 | 995f574f9ce7e35c18b24fa83cef023cb6cb6880 | /0072.py | 6071b70755a51696c8e74491f1b5f48b4e437fa4 | [] | no_license | albertzang/leetcode | 99b1ca4f5067565e97257f2b9bc4209e3e067a5f | 672d42f4fc8c400d32d7fa9c8ff16df35ec98618 | refs/heads/master | 2021-09-12T18:18:29.734624 | 2018-04-19T21:07:34 | 2018-04-19T21:07:34 | 120,690,065 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
m = len(word1)
n = len(word2)
table = [[0 for _ in xrange(n + 1)] for _ in xrange(m + 1)]
for i in xrange(1, m + 1):
table[i][0] = i
for j in xrange(1, n + 1):
table[0][j] = j
for i in xrange(1, m + 1):
for j in xrange(1, n + 1):
diff = int(word1[i - 1] != word2[j - 1])
table[i][j] = min(table[i - 1][j - 1] + diff, table[i - 1][j] + 1, table[i][j - 1] + 1)
return table[m][n]
| [
"albertzang.ys@gmail.com"
] | albertzang.ys@gmail.com |
affb952b2d0ed78a461de81d919ccab61cc0b2f2 | 3dffcaf0a99c08b3bc6dc273c32e3510497473ba | /Exercice 17.py | b1ca74caff61b2502288c0d211a24a9ceabcb265 | [] | no_license | gnorgol/Python_Exercice | 400674f2947467f2edbb86794e50c653bdb8c69d | 73494395dd5110d9f8c1cfdc59df45ab0fb9e0fb | refs/heads/main | 2023-01-14T09:31:31.411617 | 2020-11-16T15:17:07 | 2020-11-16T15:17:07 | 311,283,645 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | s = input("Saisir une chaine de caractères : ")
for each in s:
print("le caractére : "+ str(each) + " figure" +str(s.count(each))+" fois dans la chaine s") | [
"noreply@github.com"
] | gnorgol.noreply@github.com |
1973a00d90ade7fd622026b0953fc2e3b73b5b26 | 212c888339b9530e6abf26d6ae79f676335dc39f | /aceui/base/basepage.py | c340b9d69d79e8719dcf0081b650860fb0779b2a | [] | no_license | HttpTesting/aceui | c8434699b45d8bcfb32c9a8e8196648b146894f3 | 9a63c25da1fd7e2a10d873780d8c46a911bda6d6 | refs/heads/master | 2023-02-10T13:54:04.749830 | 2021-01-06T09:30:51 | 2021-01-06T09:30:51 | 323,271,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,107 | py | """
__doc__:
create:2018/10/27
by: yhleng
"""
import os
import time
from PIL import Image
from selenium.common.exceptions import (
NoSuchElementException,
ElementNotVisibleException,
UnexpectedAlertPresentException
)
from selenium.webdriver.support.select import Select
from aceui.lib import gl
from aceui.lib.core import (
replay,
hight_light_conf,
reply_case_fail,
genrandomstr,
rndint,
rnd_num,
get_data
)
from aceui.lib.config import CONF
from selenium.webdriver.common.action_chains import ActionChains
class BasePage:
"""PO公共方法类"""
def __init__(self, baseurl, driver, pagetitle):
"""
初始化,driver对象及数据
:param baseurl: 目标地址
:param driver: webdriver对象
:param pagetitle: 用来断言的目标页标题
"""
self.base_url = baseurl
self.driver = driver
self.pagetitle = pagetitle
#-----------------
# 功能描述:所有公共方法,都写在以下
# -----------------
#打开浏览器
def _open(self, url):
"""
打开浏览器,并断言标题
:param url: 目标地址
:return: 无
"""
self.driver.maximize_window()
self.driver.get(url)
self.driver.implicitly_wait(10)
self.get_image
self.driver.implicitly_wait(0)
def is_display_timeout(self, element, timeses, *loc):
"""
在指定时间内,轮询元素是否显示
:param element: 元素对象
:param timeSes: 轮询时间
:return: bool
"""
start_time = int(time.time()) #秒级时间戳
timeses = int(timeses)
while (int(time.time())-start_time) <= timeses:
if element.is_displayed() and element.is_enabled():
return True
self.wait(500)
element = self.driver.find_element(*loc)
self.get_image
return False
@reply_case_fail(num=3)
def find_element(self, *loc):
"""
在指定时间内,查找元素;否则抛出异常
:param loc: 定位器
:return: 元素 或 抛出异常
"""
timeout = 10
try:
self.driver.implicitly_wait(timeout) #智能等待;超时设置
element = self.driver.find_element(*loc) #如果element没有找到,到此处会开始等待
if self.is_display_timeout(element, timeout, *loc):
self.hightlight(element) #高亮显示
self.driver.implicitly_wait(0) # 恢复超时设置
else:
raise ElementNotVisibleException #抛出异常,给except捕获
return element
except (NoSuchElementException, ElementNotVisibleException) as ex:
self.get_image
raise ex
else:
self.get_image
@hight_light_conf('HightLight')
def hightlight(self, element):
"""
元素高亮显示
:param element: 元素对象
:return: 无
"""
# arguments[0] 为element
# arguments[1]为"border: 2px solid red;"
self.driver.execute_script(
"arguments[0].setAttribute('style', arguments[1]);",
element,
"border: 2px solid red;" #边框border:2px; red红色
)
def get_element_image(self, element):
"""
截图,指定元素图片
:param element: 元素对象
:return: 无
"""
timestrmap = time.strftime('%Y%m%d_%H.%M.%S')
imgpath = os.path.join(
gl.imgPath,
'%s.png' % str(timestrmap)
)
#截图,获取元素坐标
self.driver.save_screenshot(imgpath)
left = element.location['x']
top = element.location['y']
e_width = left + element.size['width']
e_height = top + element.size['height']
picture = Image.open(imgpath)
picture = picture.crop(
(
left,
top,
e_width,
e_height
)
)
timestrmap = time.strftime('%Y%m%d_%H.%M.%S')
imgpath = os.path.join(
gl.imgPath, '%s.png' % str(timestrmap)
)
picture.save(imgpath)
print('screenshot:', timestrmap, '.png')
@property
def get_image(self):
'''
截取图片,并保存在images文件夹
:return: 无
'''
timestrmap = time.strftime('%Y%m%d_%H.%M.%S')
imgpath = os.path.join(
gl.imgPath,
'%s.png' % str(timestrmap)
)
self.driver.save_screenshot(imgpath)
print('screenshot:', timestrmap, '.png')
@reply_case_fail(num=3)
def find_elements(self, *loc):
'''批量找元素'''
timeout = 10 #智能等待时间
try:
self.driver.implicitly_wait(timeout) #智能等待;此贯穿self.driver整个生命周期
elements = self.driver.find_elements(*loc)
self.driver.implicitly_wait(0) #恢复等待
return elements
except NoSuchElementException as ex:
self.get_image # 截取图片
raise ex
def iter_click(self, *loc):
'''批量点击某元素'''
element = self.find_elements(*loc)
for e in element:
e.click()
def iter_input(self, text=None, *loc):
"""
批量输入
:param text: 输入内容;为list
:param loc: 定位器(By.XPATH,'//*[@id='xxxx']/input')
:return: 无
"""
elements = self.find_elements(*loc)
for i, e in enumerate(elements):
self.wait(1000)
#e.clear()
e.send_keys(list(text)[i])
#文本框输入
def send_keys(self, content, *loc):
'''
:param content: 文本内容
:param itype: 如果等1,先清空文本框再输入。否则不清空直接输入
:param loc: 文本框location定位器
:return:
'''
inputelement = self.find_element(*loc)
#inputElement.clear()
inputelement.send_keys(content)
def clear_input_text(self, *loc):
'''清除文本框内容'''
self.find_element(*loc).clear()
def add_cookies(self, ck_dict):
'''
增加cookies到浏览器
:param ck_dict: cookies字典对象
:return: 无
'''
for key in ck_dict.keys():
self.driver.add_cookie(
{
"name":key,
"value":ck_dict[key]
}
)
def is_display(self, *loc):
#isDisable
'''
元素存在,判断是否显示
:param loc: 定位器
:return: 元素存在并显示返回True;否则返回False
'''
timeout = 20
try:
self.driver.implicitly_wait(timeout)
element = self.driver.find_element(*loc)
if self.is_display_timeout(element, timeout, *loc):
self.hightlight(element)
return True
return False
except (
NoSuchElementException,
ElementNotVisibleException,
UnexpectedAlertPresentException):
self.get_image #10秒还未找到显示的元素
return False
def is_exist(self, *loc):
"""
判断元素,是否存在
:param loc: 定位器(By.ID,'kw')
:return: True 或 False
"""
timeout = 60
try:
self.driver.implicitly_wait(timeout)
e = self.driver.find_element(*loc)
"""高亮显示,定位元素"""
self.hightlight(e)
self.driver.implicitly_wait(0)
except NoSuchElementException:
self.get_image #10秒还未找到元素,截图
return False
return True
def exist_and_click(self, *loc):
'''如果元素存在则单击,不存在则忽略'''
print('Click:{0}'.format(loc))
timeout = 3 #超时 时间
try:
self.driver.implicitly_wait(timeout)
element = self.driver.find_element(*loc)
self.hightlight(element)
element.click()
self.driver.implicitly_wait(0)
except NoSuchElementException:
pass
def exist_and_input(self, text, *loc):
'''如果元素存在则输入,不存在则忽略'''
print('Input:{0}'.format(text))
timeout = 3
try:
self.driver.implicitly_wait(timeout)
element = self.driver.find_element(*loc)
self.hightlight(element) #高亮显示
element.send_keys(str(text).strip())
self.driver.implicitly_wait(0)
except (NoSuchElementException, ElementNotVisibleException):
pass
def get_tag_text(self, txt_name, *loc):
"""
获取元素对象属性值
:param propertyName: Text属性名称
:param loc: #定位器
:return: 属性值 或 raise
"""
timeout = 20
try:
self.driver.implicitly_wait(timeout)
element = self.find_element(*loc)
self.hightlight(element) #高亮显示
#获取属性
pro_value = getattr(element, str(txt_name))
self.driver.implicitly_wait(0)
return pro_value
except (NoSuchElementException, NameError) as ex:
self.get_image #错误截图
raise ex
@property
def switch_window(self):
"""
切换window窗口,切换一次后退出
:return: 无
"""
cur_handle = self.driver.current_window_handle
all_handle = self.driver.window_handles
for h in all_handle:
if h != cur_handle:
self.driver.switch_to.window(h)
break
def wait(self, ms):
"""
线程休眼时间
:param ms: 毫秒
:return: 无
"""
ms = float(ms) / 1000
time.sleep(ms)
@replay
def js_click(self, desc, *loc):
"""通过js注入的方式去,单击元素"""
print('Click{}:{}'.format(desc, loc))
element = self.find_element(*loc)
self.driver.execute_script(
"arguments[0].click();",
element
)
@replay
def input_text(self, text, desc, *loc):
"""
输入文本操作
:param text:
输入文本内容;如果为%BLANK%,为清除输入框默认值;
输入文本内容;如果为%NONE%,为不做任何操作
:param desc:
:param loc:
:return:
"""
print('Input{}:{}'.format(desc, text))
var = get_data(gl.configFile,'CONFIG')
flag_conf = var['Custom_Var']
# 判断是自定义函数%%还是普通文本
if str(text).startswith('%') and str(text).endswith('%'):
flag = str(text)
#判断是自定义函数是否带参数
if ('(' in str(text)) and (')' in str(text)):
s = str(text).rsplit('(')
flag = '{}{}'.format(s[0],'%')
param = s[1].rsplit(')')[0]
#eval恢复函数原型并调用
eval(str(flag_conf[flag]).format(param))
else:
flag = str(text)
eval(flag_conf[flag])
else:
self.send_keys(text, *loc)
@replay
def input_text_index(self, desc, text ,index, *loc):
"""
按索引输入文本
:param desc: 输入框描述
:param text: 输入内容
:param index: 元素索引
:param loc: 定位器
:return: 无
"""
print('Input{}:{}'.format(desc, text))
index = int(index)
var = get_data(gl.configFile,'CONFIG')
flag_conf = var['Custom_Var']
# 判断是自定义函数%%还是普通文本
if str(text).startswith('%') and str(text).endswith('%'):
flag = str(text)
#判断是自定义函数是否带参数
if ('(' in str(text)) and (')' in str(text)):
s = str(text).rsplit('(')
flag = '{}{}'.format(s[0],'%')
param = s[1].rsplit(')')[0]
#eval恢复函数原型并调用
eval(str(flag_conf[flag]).format(param))
else:
flag = str(text)
eval(flag_conf[flag])
else:
ele = self.find_elements(*loc)[index]
ele.send_keys(text)
@replay
def click_button(self, desc, *loc):
"""点击操作"""
print('Click:{}{}'.format(desc, loc))
if str(desc).strip().upper() == '%NONE%':
pass
else:
ele = self.find_element(*loc)
self.action_chains.move_to_element(ele).move_by_offset(5,5).click().perform()
@replay
def click_btn_index(self, desc, index, *loc):
"""
点击操作,按索引,适用于findelemens方法
:param desc: 描述
:param index: 点击索引
:param op: 如果是True则,不执行点击操作,为False则点击
:param loc: 定位器
:return: 无
"""
print('Clicks:{}{}'.format(desc, loc))
if index == '%NONE%':
pass
else:
ele = self.find_elements(*loc)[int(index)]
# 元素高亮显示
self.hightlight(ele)
# 元素单击
self.action_chains.move_to_element(ele).move_by_offset(5,5).click().perform()
@replay
def select_tab(self, *loc):
'''选择tab操作'''
print('Select:{}'.format(loc))
self.find_element(*loc).click()
@property
def open(self):
"""打开浏览器,写入cookies登录信息"""
cook_conf = CONF.read(gl.configFile)['CONFIG']['Cookies']
self._open(self.base_url)
if cook_conf['USED']:
self.add_cookies(cook_conf['LoginCookies'])
self._open(self.base_url)
assert self.driver.title == self.pagetitle, "断言标题错误,请查检页面"
def execute_script(self, js):
"""执行js脚本"""
self.driver.execute_script(js)
def select_list(self, *loc):
"""
创建Select对象
:param loc: 定位
:return: Select对象
"""
return Select(self.find_element(*loc))
def get_element_attribute(self, attr, *loc):
"""
获取元素属性
:param attr: 属性
:param loc: 定位
:return: 无
"""
ele = self.find_element(*loc)
try:
att = ele.get_attribute(attr)
except Exception as ex:
print('属性错误:{}'.format(attr))
raise ex
return att
def set_element_attribute(self,attr, val ,*loc):
"""
设置元素属性值,适用于元素唯一
:param attr: 元素属性名称如:class
:param val: 元素属性值
:param loc: 定位
:return: 无
"""
element = self.find_element(*loc)
self.driver.execute_script(
"arguments[0].attr({},{});".format(attr, val),
element
)
def del_element_attribute(self, attr, *loc):
element = self.find_element(*loc)
self.driver.execute_script(
"arguments[0].removeAttribute({});".format(attr),
element
)
def set_elements_attribute(self,attr, val ,index ,*loc):
"""
设置元素属性
:param attr: 元素属性名如:class
:param val: 元素属性值
:param index: 元素索引,为1000时代表全部设置
:param loc: 元素定位
:return: 无
"""
index = int(index)
element = self.find_elements(*loc)
if index == 1000:
for ele in element:
self.driver.execute_script(
"arguments[0].attr({},{});".format(attr, val),
ele
)
else:
self.driver.execute_script(
"arguments[0].attr({},{});".format(attr, val),
element[index]
)
def get_index_text(self, txt_name, index, *loc):
"""
获取元素对象属性值
:param propertyName: Text属性名称
:param loc: #定位器
:return: 属性值 或 raise
"""
timeout = 10
try:
self.driver.implicitly_wait(timeout)
element = self.find_elements(*loc)[int(index)]
self.hightlight(element) #高亮显示
#获取属性
pro_value = getattr(element, str(txt_name))
self.driver.implicitly_wait(0)
return pro_value
except (NoSuchElementException, NameError) as ex:
self.get_image #错误截图
raise ex
def get_attribute_index(self, attr, index,*loc):
"""
获取元素属性
:param attr: 属性
:param loc: 定位
:return: 无
"""
ele = self.find_elements(*loc)[int(index)]
try:
att = ele.get_attribute(attr)
except Exception as ex:
print('属性错误:{}'.format(attr))
raise ex
return att
@property
def action_chains(self):
"""右键方法"""
action = ActionChains(self.driver)
return action
def scroll_top_by_element(self, *loc):
'''
将元素滚动到屏幕最上方
element 滚动条所在的元素div
'''
ele = self.find_element(*loc)
self.driver.execute_script("arguments[0].scrollTop=10000;", ele)
if __name__ == "__main__":
base = BasePage('', '', '')
base.input_text('%RND(5)%', '', '')
| [
"lengyaohui@163.com"
] | lengyaohui@163.com |
878ea199022c142618b146acd39e6a8e298d8e7d | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4456629.3.spec | 8a2485d2abe4621730ed59fee93f7b17ba12906e | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 14,301 | spec | {
"id": "mgm4456629.3",
"metadata": {
"mgm4456629.3.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 38759,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 491,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/100.preprocess.removed.fna.gz"
},
"100.preprocess.removed.fna.stats": {
"compression": null,
"description": null,
"size": 305,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/100.preprocess.removed.fna.stats"
},
"205.screen.h_sapiens_asm.info": {
"compression": null,
"description": null,
"size": 448,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/205.screen.h_sapiens_asm.info"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1065,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 38060,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 346,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 309,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/299.screen.passed.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 8236,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 307,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 40949,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/440.cluster.rna97.mapping"
},
"440.cluster.rna97.mapping.stats": {
"compression": null,
"description": null,
"size": 48,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/440.cluster.rna97.mapping.stats"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 56575,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 14500,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 10174,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 104770,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/450.rna.sims.gz"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 5859,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 5364,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 8554,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 43,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 12814,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 812824,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 94,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 1,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 355,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 35,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 1491,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 1790,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 633,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 206,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 36222,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 79,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 4498,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4456629.3/file/999.done.species.stats"
}
},
"id": "mgm4456629.3",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4456629.3"
}
},
"raw": {
"mgm4456629.3.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4456629.3"
}
}
} | [
"jared.wilkening@gmail.com"
] | jared.wilkening@gmail.com |
79160475b546c0e1a9d4475f04595beef7a90f93 | e930b11c3271bec1bc9e76dd0cca27d7f0be4378 | /Activity7_BasicManipulations/TransformingImages.py | 57380ae58a86ade0188a5aa751d4e50a8149b4ca | [] | no_license | tianyoul/AI-Robotics | c55b0382c850298af9bbc03a3cee93fbecafbe17 | 01732d90d5099a3ac3b723ff05376d27208d534a | refs/heads/master | 2021-04-29T18:03:18.549750 | 2018-04-30T21:53:15 | 2018-04-30T21:53:15 | 121,683,110 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | import cv2
import numpy
image = cv2.imread("TestImages/DollarCoin.jpg")
cv2.imshow("Original Image", image)
cv2.waitKey(0)
#Returns a new image that is 100 x 100 pixels, a stretched version of the original
resized = cv2.resize(image, (100, 100))
cv2.imshow("Resized Image", resized)
cv2.waitKey(0)
#Returns a new image that is twice the size of the original, same aspect ratio
resized = cv2.resize(image, (0, 0), fx = 2, fy = 2)
cv2.imshow("same ratio Image", resized)
cv2.waitKey(0)
#Returns a new image whose columns have been squashed to half the original
resized = cv2.resize(image, (0, 0), fx = 0.5, fy = 1.0)
cv2.imshow("squashed to half Image", resized)
cv2.waitKey(0) | [
"wkim@macalester.edu"
] | wkim@macalester.edu |
c8983eaece84eaea3e11e2c48d8b55af4c40209d | 998e56e8be92a57024fbc2d4b2d098bd4aa316fb | /todoapp/main.py | 260a659339a633f28e848daac5dfcb197343f2f2 | [] | no_license | Tela96/cc_work | f51a045f102c521b692aaa75e6e15916765ff75e | 9b28c44e7ce302799bf766a13e6bc40f35f13ba5 | refs/heads/master | 2021-01-11T06:49:07.985726 | 2017-01-24T13:46:39 | 2017-01-24T13:46:39 | 72,417,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 127 | py | import pickle
import io
list_test = [1,2,3,4,5]
pickle.dump(list_test, test)
list_test = pickle.load(test)
print(list_test[3]) | [
"tel.akos.2@gmail.com"
] | tel.akos.2@gmail.com |
894e2f67bba5c83742d2ce4fd70856440443ba52 | b022261b8e6ff3dad97459946b72cb0fad0c468d | /longest_common_subsequence.py | 2c57c7c9adf8a809675c34dd810a5731f82f12c6 | [] | no_license | alleniac/coding | fb5c9960e4c83c6ace27ff1be4facbff95ef4952 | 9b9069189ba4b048adbdb085e5d3719b20bf5e68 | refs/heads/master | 2020-08-17T04:42:26.054964 | 2019-12-18T00:20:40 | 2019-12-18T00:20:40 | 215,609,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,439 | py | import time
def test(func, testCase):
test1 = ['abdca', 'cbda']
test2 = ['passport', 'psspt']
lcl = locals()
start = time.perf_counter()
result = func(lcl[f'test{testCase}'][0], lcl[f'test{testCase}'][1])
end = time.perf_counter()
print(f'function: {func.__name__}, testCase: {testCase}, result: {result}, time: {end - start}')
def func1(s1, s2):
return dfs(s1, s2, 0, 0, 0)
def func2(s1, s2):
n = len(s1)
m = len(s2)
memo = [[[-1 for _ in range(max(n, m))] for _ in range(m)] for _ in range(n)]
return dfs2(s1, s2, memo, 0, 0, 0)
def func3(s1, s2):
n = len(s1)
m = len(s2)
dp = [[0 for _ in range(m + 1)] for _ in range(n + 1)]
maxLength = -1
for i in range(n + 1):
for j in range(m + 1):
if i == 0 or j == 0:
continue
if s1[i - 1] == s2[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + 1
maxLength = max(maxLength, dp[i][j])
return maxLength
def func4(s1, s2):
n = len(s1)
m = len(s2)
if n == 0 or m == 0:
return 0
prev = [0 for _ in range(m + 1)]
crt = [0 for _ in range(m + 1)]
result = 0
for i in range(1, n + 1):
for j in range(1, m + 1):
if s1[i - 1] == s2[j - 1]:
crt[j] = prev[j - 1] + 1
result = max(result, crt[j])
prev, crt = crt, [0 for _ in range(m + 1)]
return result
def dfs(s1, s2, i, j, length):
n1 = len(s1)
n2 = len(s2)
if i >= n1 or j >= n2:
return length
if s1[i] == s2[j]:
length = dfs(s1, s2, i + 1, j + 1, length + 1)
return max(length, dfs(s1, s2, i, j + 1, 0), dfs(s1, s2, i + 1, j, 0))
# return the longest common substring at the positions of i and j till the end of the two strings
def dfs2(s1, s2, memo, i, j, length):
if i == len(s1) - 1 or j == len(s2) - 1:
return length
if memo[i][j][length] != -1:
return memo[i][j][length]
if s1[i] == s2[j]:
memo[i][j][length] = dfs2(s1, s2, memo, i + 1, j + 1, length + 1)
memo[i][j][length] = max(memo[i][j][length], dfs2(s1, s2, memo, i, j + 1, 0), dfs2(s1, s2, memo, i + 1, j, 0))
return memo[i][j][length]
if __name__ == '__main__':
test(func1, 1)
test(func2, 1)
test(func3, 1)
test(func4, 1)
test(func1, 2)
test(func2, 2)
test(func3, 2)
test(func4, 2)
| [
"theallenma@gmail.com"
] | theallenma@gmail.com |
93bfb4e8a151b2e4adbc5d836ee68c0d723722e0 | b557810d1d4603db7518c7229b5b3466b77a136e | /live_chat/bin/wheel3 | e8b4584603a7cf61173da5244c3fc3a3b9cd1994 | [] | no_license | royinttime/python_live_chat | c809b3aca411553cda8bda82bda9d6609c5c394c | 715d0aad78378fe7144b34c1c32930465dc0df98 | refs/heads/master | 2021-05-22T01:03:58.335937 | 2020-04-04T03:24:06 | 2020-04-04T03:24:06 | 252,898,438 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/Users/roytseng/side_project/python_live_chat/live_chat/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"royinttime@gmail.com"
] | royinttime@gmail.com | |
a0ba0f8d5ac0bf71c18e90491831eea9e884eea3 | 2a91a64f5464c48fb56bd2f0e01668737d4eafa9 | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/resourcesettings/v1alpha1/resourcesettings_v1alpha1_messages.py | b11a7c1f6f7f92eb4e127e2fa1b46f7f49f45edf | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | salewski/google-cloud-sdk | 8513faf8c2f5b9180361efb567c4cfb9986d1e21 | 060174026ac068b6442b6c58bedf5adc7bc549cb | refs/heads/master | 2023-08-16T09:44:57.948620 | 2021-10-05T00:00:00 | 2021-10-05T16:15:40 | 417,465,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,806 | py | """Generated message classes for resourcesettings version v1alpha1.
The Resource Settings API allows users to control and modify the behavior of
their GCP resources (e.g., VM, firewall, Project, etc.) across the Cloud
Resource Hierarchy.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'resourcesettings'
class GoogleCloudResourcesettingsV1alpha1ListSettingsResponse(_messages.Message):
r"""The response from ListSettings.
Fields:
nextPageToken: Unused. A page token used to retrieve the next page.
settings: A list of settings that are available at the specified Cloud
resource.
"""
nextPageToken = _messages.StringField(1)
settings = _messages.MessageField('GoogleCloudResourcesettingsV1alpha1Setting', 2, repeated=True)
class GoogleCloudResourcesettingsV1alpha1SearchSettingValuesResponse(_messages.Message):
r"""The response from SearchSettingValues.
Fields:
nextPageToken: Unused. A page token used to retrieve the next page.
settingValues: All setting values that exist on the specified Cloud
resource.
"""
nextPageToken = _messages.StringField(1)
settingValues = _messages.MessageField('GoogleCloudResourcesettingsV1alpha1SettingValue', 2, repeated=True)
class GoogleCloudResourcesettingsV1alpha1Setting(_messages.Message):
r"""The schema for setting values. At a given Cloud resource, a setting can
parent at most one setting value.
Enums:
DataTypeValueValuesEnum: The data type for this setting.
Fields:
dataType: The data type for this setting.
defaultValue: The value received by LookupEffectiveSettingValue if no
setting value is explicitly set. Note: not all settings have a default
value.
description: A detailed description of what this setting does.
displayName: The human readable name for this setting.
name: The resource name of the setting. Must be in one of the following
forms: * `projects/{project_number}/settings/{setting_name}` *
`folders/{folder_id}/settings/{setting_name}` *
`organizations/{organization_id}/settings/{setting_name}` For example,
"/projects/123/settings/gcp-enableMyFeature"
readOnly: A flag indicating that values of this setting cannot be modified
(see documentation of the specific setting for updates and reasons);
however, it may be deleted using DeleteSettingValue if
DeleteSettingValueRequest.ignore_read_only is set to true. Using this
flag is considered an acknowledgement that the setting value cannot be
recreated. See DeleteSettingValueRequest.ignore_read_only for more
details.
"""
class DataTypeValueValuesEnum(_messages.Enum):
r"""The data type for this setting.
Values:
DATA_TYPE_UNSPECIFIED: Unspecified data type.
BOOLEAN: A boolean setting.
STRING: A string setting.
STRING_SET: A string set setting.
ENUM_VALUE: A Enum setting
DURATION_VALUE: A Duration setting
STRING_MAP: A string->string map setting
"""
DATA_TYPE_UNSPECIFIED = 0
BOOLEAN = 1
STRING = 2
STRING_SET = 3
ENUM_VALUE = 4
DURATION_VALUE = 5
STRING_MAP = 6
dataType = _messages.EnumField('DataTypeValueValuesEnum', 1)
defaultValue = _messages.MessageField('GoogleCloudResourcesettingsV1alpha1Value', 2)
description = _messages.StringField(3)
displayName = _messages.StringField(4)
name = _messages.StringField(5)
readOnly = _messages.BooleanField(6)
class GoogleCloudResourcesettingsV1alpha1SettingValue(_messages.Message):
r"""The instantiation of a setting. Every setting value is parented by its
corresponding setting.
Fields:
etag: A fingerprint used for optimistic concurrency. See
UpdateSettingValue for more details.
name: The resource name of the setting value. Must be in one of the
following forms: *
`projects/{project_number}/settings/{setting_name}/value` *
`folders/{folder_id}/settings/{setting_name}/value` *
`organizations/{organization_id}/settings/{setting_name}/value` For
example, "/projects/123/settings/gcp-enableMyFeature/value"
readOnly: Output only. A flag indicating that this setting value cannot be
modified; however, it may be deleted using DeleteSettingValue if
DeleteSettingValueRequest.ignore_read_only is set to true. Using this
flag is considered an acknowledgement that the setting value cannot be
recreated. This flag is inherited from its parent setting and is for
convenience purposes. See Setting.read_only for more details.
updateTime: Output only. The timestamp indicating when the setting value
was last updated.
value: The value of the setting. The data type of Value must always be
consistent with the data type defined by the parent setting.
"""
etag = _messages.StringField(1)
name = _messages.StringField(2)
readOnly = _messages.BooleanField(3)
updateTime = _messages.StringField(4)
value = _messages.MessageField('GoogleCloudResourcesettingsV1alpha1Value', 5)
class GoogleCloudResourcesettingsV1alpha1Value(_messages.Message):
r"""The data in a setting value.
Fields:
booleanValue: Defines this value as being a boolean value.
durationValue: Defines this value as being a Duration.
enumValue: Defines this value as being a Enum.
stringMapValue: Defines this value as being a StringMap.
stringSetValue: Defines this value as being a StringSet.
stringValue: Defines this value as being a string value.
"""
booleanValue = _messages.BooleanField(1)
durationValue = _messages.StringField(2)
enumValue = _messages.MessageField('GoogleCloudResourcesettingsV1alpha1ValueEnumValue', 3)
stringMapValue = _messages.MessageField('GoogleCloudResourcesettingsV1alpha1ValueStringMap', 4)
stringSetValue = _messages.MessageField('GoogleCloudResourcesettingsV1alpha1ValueStringSet', 5)
stringValue = _messages.StringField(6)
class GoogleCloudResourcesettingsV1alpha1ValueEnumValue(_messages.Message):
r"""A enum value that can hold any enum type setting values. Each enum type
is represented by a number, this representation is stored in the
definitions.
Fields:
value: The value of this enum
"""
value = _messages.StringField(1)
class GoogleCloudResourcesettingsV1alpha1ValueStringMap(_messages.Message):
r"""A string->string map value that can hold a map of string keys to string
values. The maximum length of each string is 200 characters and there can be
a maximum of 50 key-value pairs in the map.
Messages:
MappingsValue: The key-value pairs in the map
Fields:
mappings: The key-value pairs in the map
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class MappingsValue(_messages.Message):
r"""The key-value pairs in the map
Messages:
AdditionalProperty: An additional property for a MappingsValue object.
Fields:
additionalProperties: Additional properties of type MappingsValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MappingsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
mappings = _messages.MessageField('MappingsValue', 1)
class GoogleCloudResourcesettingsV1alpha1ValueStringSet(_messages.Message):
r"""A string set value that can hold a set of strings. The maximum length of
each string is 60 characters and there can be a maximum of 50 strings in the
string set.
Fields:
values: The strings in the set
"""
values = _messages.StringField(1, repeated=True)
class GoogleProtobufEmpty(_messages.Message):
r"""A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to use it as the request
or the response type of an API method. For instance: service Foo { rpc
Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON
representation for `Empty` is empty JSON object `{}`.
"""
class ResourcesettingsFoldersSettingsDeleteValueRequest(_messages.Message):
r"""A ResourcesettingsFoldersSettingsDeleteValueRequest object.
Fields:
ignoreReadOnly: A flag that allows the deletion of the value of a
`read_only` setting. WARNING: use at your own risk. Deleting the value
of a read only setting is an irreversible action (i.e., it cannot be
created again).
name: The name of the setting value to delete. See SettingValue for naming
requirements.
"""
ignoreReadOnly = _messages.BooleanField(1)
name = _messages.StringField(2, required=True)
class ResourcesettingsFoldersSettingsGetValueRequest(_messages.Message):
r"""A ResourcesettingsFoldersSettingsGetValueRequest object.
Fields:
name: The name of the setting value to get. See SettingValue for naming
requirements.
"""
name = _messages.StringField(1, required=True)
class ResourcesettingsFoldersSettingsListRequest(_messages.Message):
r"""A ResourcesettingsFoldersSettingsListRequest object.
Fields:
pageSize: Unused. The size of the page to be returned.
pageToken: Unused. A page token used to retrieve the next page.
parent: The Cloud resource that parents the setting. Must be in one of the
following forms: * `projects/{project_number}` * `projects/{project_id}`
* `folders/{folder_id}` * `organizations/{organization_id}`
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class ResourcesettingsFoldersSettingsLookupEffectiveValueRequest(_messages.Message):
r"""A ResourcesettingsFoldersSettingsLookupEffectiveValueRequest object.
Fields:
parent: The setting for which an effective value will be evaluated. See
Setting for naming requirements.
"""
parent = _messages.StringField(1, required=True)
class ResourcesettingsFoldersSettingsSearchRequest(_messages.Message):
r"""A ResourcesettingsFoldersSettingsSearchRequest object.
Fields:
pageSize: Unused. The size of the page to be returned.
pageToken: Unused. A page token used to retrieve the next page.
parent: The Cloud resource that parents the setting. Must be in one of the
following forms: * `projects/{project_number}` * `projects/{project_id}`
* `folders/{folder_id}` * `organizations/{organization_id}`
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class ResourcesettingsFoldersSettingsUpdateValueRequest(_messages.Message):
r"""A ResourcesettingsFoldersSettingsUpdateValueRequest object.
Fields:
googleCloudResourcesettingsV1alpha1SettingValue: A
GoogleCloudResourcesettingsV1alpha1SettingValue resource to be passed as
the request body.
name: The resource name of the setting value. Must be in one of the
following forms: *
`projects/{project_number}/settings/{setting_name}/value` *
`folders/{folder_id}/settings/{setting_name}/value` *
`organizations/{organization_id}/settings/{setting_name}/value` For
example, "/projects/123/settings/gcp-enableMyFeature/value"
"""
googleCloudResourcesettingsV1alpha1SettingValue = _messages.MessageField('GoogleCloudResourcesettingsV1alpha1SettingValue', 1)
name = _messages.StringField(2, required=True)
class ResourcesettingsFoldersSettingsValueCreateRequest(_messages.Message):
r"""A ResourcesettingsFoldersSettingsValueCreateRequest object.
Fields:
googleCloudResourcesettingsV1alpha1SettingValue: A
GoogleCloudResourcesettingsV1alpha1SettingValue resource to be passed as
the request body.
name: The resource name of the setting value. Must be in one of the
following forms: *
`projects/{project_number}/settings/{setting_name}/value` *
`folders/{folder_id}/settings/{setting_name}/value` *
`organizations/{organization_id}/settings/{setting_name}/value` For
example, "/projects/123/settings/gcp-enableMyFeature/value"
"""
googleCloudResourcesettingsV1alpha1SettingValue = _messages.MessageField('GoogleCloudResourcesettingsV1alpha1SettingValue', 1)
name = _messages.StringField(2, required=True)
class ResourcesettingsOrganizationsSettingsDeleteValueRequest(_messages.Message):
r"""A ResourcesettingsOrganizationsSettingsDeleteValueRequest object.
Fields:
ignoreReadOnly: A flag that allows the deletion of the value of a
`read_only` setting. WARNING: use at your own risk. Deleting the value
of a read only setting is an irreversible action (i.e., it cannot be
created again).
name: The name of the setting value to delete. See SettingValue for naming
requirements.
"""
ignoreReadOnly = _messages.BooleanField(1)
name = _messages.StringField(2, required=True)
class ResourcesettingsOrganizationsSettingsGetValueRequest(_messages.Message):
r"""A ResourcesettingsOrganizationsSettingsGetValueRequest object.
Fields:
name: The name of the setting value to get. See SettingValue for naming
requirements.
"""
name = _messages.StringField(1, required=True)
class ResourcesettingsOrganizationsSettingsListRequest(_messages.Message):
r"""A ResourcesettingsOrganizationsSettingsListRequest object.
Fields:
pageSize: Unused. The size of the page to be returned.
pageToken: Unused. A page token used to retrieve the next page.
parent: The Cloud resource that parents the setting. Must be in one of the
following forms: * `projects/{project_number}` * `projects/{project_id}`
* `folders/{folder_id}` * `organizations/{organization_id}`
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class ResourcesettingsOrganizationsSettingsLookupEffectiveValueRequest(_messages.Message):
r"""A ResourcesettingsOrganizationsSettingsLookupEffectiveValueRequest
object.
Fields:
parent: The setting for which an effective value will be evaluated. See
Setting for naming requirements.
"""
parent = _messages.StringField(1, required=True)
class ResourcesettingsOrganizationsSettingsSearchRequest(_messages.Message):
r"""A ResourcesettingsOrganizationsSettingsSearchRequest object.
Fields:
pageSize: Unused. The size of the page to be returned.
pageToken: Unused. A page token used to retrieve the next page.
parent: The Cloud resource that parents the setting. Must be in one of the
following forms: * `projects/{project_number}` * `projects/{project_id}`
* `folders/{folder_id}` * `organizations/{organization_id}`
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class ResourcesettingsOrganizationsSettingsUpdateValueRequest(_messages.Message):
r"""A ResourcesettingsOrganizationsSettingsUpdateValueRequest object.
Fields:
googleCloudResourcesettingsV1alpha1SettingValue: A
GoogleCloudResourcesettingsV1alpha1SettingValue resource to be passed as
the request body.
name: The resource name of the setting value. Must be in one of the
following forms: *
`projects/{project_number}/settings/{setting_name}/value` *
`folders/{folder_id}/settings/{setting_name}/value` *
`organizations/{organization_id}/settings/{setting_name}/value` For
example, "/projects/123/settings/gcp-enableMyFeature/value"
"""
googleCloudResourcesettingsV1alpha1SettingValue = _messages.MessageField('GoogleCloudResourcesettingsV1alpha1SettingValue', 1)
name = _messages.StringField(2, required=True)
class ResourcesettingsOrganizationsSettingsValueCreateRequest(_messages.Message):
r"""A ResourcesettingsOrganizationsSettingsValueCreateRequest object.
Fields:
googleCloudResourcesettingsV1alpha1SettingValue: A
GoogleCloudResourcesettingsV1alpha1SettingValue resource to be passed as
the request body.
name: The resource name of the setting value. Must be in one of the
following forms: *
`projects/{project_number}/settings/{setting_name}/value` *
`folders/{folder_id}/settings/{setting_name}/value` *
`organizations/{organization_id}/settings/{setting_name}/value` For
example, "/projects/123/settings/gcp-enableMyFeature/value"
"""
googleCloudResourcesettingsV1alpha1SettingValue = _messages.MessageField('GoogleCloudResourcesettingsV1alpha1SettingValue', 1)
name = _messages.StringField(2, required=True)
class ResourcesettingsProjectsSettingsDeleteValueRequest(_messages.Message):
r"""A ResourcesettingsProjectsSettingsDeleteValueRequest object.
Fields:
ignoreReadOnly: A flag that allows the deletion of the value of a
`read_only` setting. WARNING: use at your own risk. Deleting the value
of a read only setting is an irreversible action (i.e., it cannot be
created again).
name: The name of the setting value to delete. See SettingValue for naming
requirements.
"""
ignoreReadOnly = _messages.BooleanField(1)
name = _messages.StringField(2, required=True)
class ResourcesettingsProjectsSettingsGetValueRequest(_messages.Message):
r"""A ResourcesettingsProjectsSettingsGetValueRequest object.
Fields:
name: The name of the setting value to get. See SettingValue for naming
requirements.
"""
name = _messages.StringField(1, required=True)
class ResourcesettingsProjectsSettingsListRequest(_messages.Message):
r"""A ResourcesettingsProjectsSettingsListRequest object.
Fields:
pageSize: Unused. The size of the page to be returned.
pageToken: Unused. A page token used to retrieve the next page.
parent: The Cloud resource that parents the setting. Must be in one of the
following forms: * `projects/{project_number}` * `projects/{project_id}`
* `folders/{folder_id}` * `organizations/{organization_id}`
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class ResourcesettingsProjectsSettingsLookupEffectiveValueRequest(_messages.Message):
r"""A ResourcesettingsProjectsSettingsLookupEffectiveValueRequest object.
Fields:
parent: The setting for which an effective value will be evaluated. See
Setting for naming requirements.
"""
parent = _messages.StringField(1, required=True)
class ResourcesettingsProjectsSettingsSearchRequest(_messages.Message):
r"""A ResourcesettingsProjectsSettingsSearchRequest object.
Fields:
pageSize: Unused. The size of the page to be returned.
pageToken: Unused. A page token used to retrieve the next page.
parent: The Cloud resource that parents the setting. Must be in one of the
following forms: * `projects/{project_number}` * `projects/{project_id}`
* `folders/{folder_id}` * `organizations/{organization_id}`
"""
pageSize = _messages.IntegerField(1, variant=_messages.Variant.INT32)
pageToken = _messages.StringField(2)
parent = _messages.StringField(3, required=True)
class ResourcesettingsProjectsSettingsUpdateValueRequest(_messages.Message):
r"""A ResourcesettingsProjectsSettingsUpdateValueRequest object.
Fields:
googleCloudResourcesettingsV1alpha1SettingValue: A
GoogleCloudResourcesettingsV1alpha1SettingValue resource to be passed as
the request body.
name: The resource name of the setting value. Must be in one of the
following forms: *
`projects/{project_number}/settings/{setting_name}/value` *
`folders/{folder_id}/settings/{setting_name}/value` *
`organizations/{organization_id}/settings/{setting_name}/value` For
example, "/projects/123/settings/gcp-enableMyFeature/value"
"""
googleCloudResourcesettingsV1alpha1SettingValue = _messages.MessageField('GoogleCloudResourcesettingsV1alpha1SettingValue', 1)
name = _messages.StringField(2, required=True)
class ResourcesettingsProjectsSettingsValueCreateRequest(_messages.Message):
r"""A ResourcesettingsProjectsSettingsValueCreateRequest object.
Fields:
googleCloudResourcesettingsV1alpha1SettingValue: A
GoogleCloudResourcesettingsV1alpha1SettingValue resource to be passed as
the request body.
name: The resource name of the setting value. Must be in one of the
following forms: *
`projects/{project_number}/settings/{setting_name}/value` *
`folders/{folder_id}/settings/{setting_name}/value` *
`organizations/{organization_id}/settings/{setting_name}/value` For
example, "/projects/123/settings/gcp-enableMyFeature/value"
"""
googleCloudResourcesettingsV1alpha1SettingValue = _messages.MessageField('GoogleCloudResourcesettingsV1alpha1SettingValue', 1)
name = _messages.StringField(2, required=True)
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
FXgafvValueValuesEnum: V1 error format.
AltValueValuesEnum: Data format for response.
Fields:
f__xgafv: V1 error format.
access_token: OAuth access token.
alt: Data format for response.
callback: JSONP
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: Available to use for quota purposes for server-side
applications. Can be any arbitrary string assigned to a user, but should
not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
uploadType: Legacy upload protocol for media (e.g. "media", "multipart").
upload_protocol: Upload protocol for media (e.g. "raw", "multipart").
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for response.
Values:
json: Responses with Content-Type of application/json
media: Media download with context-dependent Content-Type
proto: Responses with Content-Type of application/x-protobuf
"""
json = 0
media = 1
proto = 2
class FXgafvValueValuesEnum(_messages.Enum):
r"""V1 error format.
Values:
_1: v1 error format
_2: v2 error format
"""
_1 = 0
_2 = 1
f__xgafv = _messages.EnumField('FXgafvValueValuesEnum', 1)
access_token = _messages.StringField(2)
alt = _messages.EnumField('AltValueValuesEnum', 3, default='json')
callback = _messages.StringField(4)
fields = _messages.StringField(5)
key = _messages.StringField(6)
oauth_token = _messages.StringField(7)
prettyPrint = _messages.BooleanField(8, default=True)
quotaUser = _messages.StringField(9)
trace = _messages.StringField(10)
uploadType = _messages.StringField(11)
upload_protocol = _messages.StringField(12)
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| [
"gcloud@google.com"
] | gcloud@google.com |
94c88e893fab70eb22becd4d8470f07518bbf6a5 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/networkx/algorithms/centrality/tests/test_dispersion.py | ea1ba9e9b7e48392782524321a3dcf960ee5d629 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:831ddc7c07f1bc0699f90cfd23a91ebe38264e16cd0c35dcf82dab49654d5e00
size 1601
| [
"yamprakash130@gmail.com"
] | yamprakash130@gmail.com |
ff6d19853cc83ff9be398b8e53479f4932609be5 | 1686fed9d0d40441cc9c1d1274b0dd38368a0cb8 | /python/Django/polls/models.py | 0b2fef6547c6d8b02ed7e5abf9e6abd60e0b0afa | [] | no_license | yso1983/_docker | 256681c482fcd801cf03e2b18ad07a3300dc7ad0 | 6e33231bdb0dedb96e22a5da5fdd48c2b3bdb317 | refs/heads/main | 2023-07-09T09:37:56.825056 | 2021-08-09T05:34:47 | 2021-08-09T05:34:47 | 345,030,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 923 | py | import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
#return self.pub_date >= timezone.now() - datetime.timedelta(days=1)
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| [
"yso1983@gmail.com"
] | yso1983@gmail.com |
59009bd5dd01f17d4fb38bd72dd81cf54fcaab2e | f1e0fa146050dac18049be305520885d6592af89 | /timeoppgaver/find_rate.py | c36e32501597cf4ee4d54ead0b2f431e1ee8c43c | [] | no_license | ankile/ITGK-TDT4110 | cc09a8df95f77e1bb75d463d9c17427a16782a77 | a2b738d3e52bd1d115e0c3afcf01012a429695d5 | refs/heads/master | 2020-04-01T21:53:32.142073 | 2018-10-18T20:26:58 | 2018-10-18T20:26:58 | 153,680,667 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | def find_rate(filename, check_rate, acc):
f = open(filename, 'r')
for line in f:
line = line.strip()
liste = line.split()
rate = float(liste[1])
if round(check_rate, acc)== round(rate, acc):
print(liste[0] + ':', rate)
find_rate('USD_NOK.txt', 6.2, 2) | [
"lars.lien@ignite.no"
] | lars.lien@ignite.no |
8680c9e92af3922ad6c0d145b8851d2989051fe3 | 9a16cb0f2d2f8a85c0dfa9c812bd2668b38caba9 | /day05/day05_1.py | 1f39e5a0552f15afa4badaa51f1e84d62f2eb07b | [] | no_license | ScottLangridge/Advent-of-Code-2020 | f37abb4b3d7c87cd8749c1e7fa6db7fe6e9d5f54 | 9879645bae3017f8a9796896401450ff6a9b7ed2 | refs/heads/master | 2023-06-10T02:28:17.499520 | 2021-07-02T13:36:58 | 2021-07-02T13:36:58 | 318,517,542 | 0 | 0 | null | 2020-12-12T13:57:50 | 2020-12-04T12:59:03 | Python | UTF-8 | Python | false | false | 840 | py | def main(raw_input):
seat_coords = parse_boarding_passes(raw_input)
seat_ids = [get_seat_id(i) for i in seat_coords]
return max(seat_ids)
def get_input(filename):
with open(filename) as f:
raw_input = f.read()
return raw_input
def parse_boarding_passes(raw_input):
seat_strings = raw_input.splitlines()
seat_coords = []
for i in seat_strings:
row_bin = i[:7].replace('F', '0').replace('B', '1')
col_bin = i[-3:].replace('L', '0').replace('R', '1')
row_int = int(row_bin, 2)
col_int = int(col_bin, 2)
seat_coords.append((row_int, col_int))
return seat_coords
def get_seat_id(seat_coordinate):
return seat_coordinate[0] * 8 + seat_coordinate[1]
if __name__ == '__main__':
puzzle_input = get_input('input.txt')
print(main(puzzle_input))
| [
"scott.m.langridge@gmail.com"
] | scott.m.langridge@gmail.com |
9e357902fc5b561c2ef46702a81481dc9f93f830 | 493466f521ebf469434134d61a0c8cadd1f78850 | /BBT codes/eda1BBT.py | 0788b5b8d7b704778bcf431b8777c54d03424a3b | [] | no_license | chariot1498/Popularity-of-tv-shows-on-tumblr | b95294a51c0793ca91b0fdc8f2d470c4e2f7283f | d5343be19ff044ad7b9c07249eaadf490c5bb7bf | refs/heads/master | 2022-07-13T22:05:24.996079 | 2020-05-08T17:31:05 | 2020-05-08T17:31:05 | 115,494,542 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,967 | py | import mysql.connector
import numpy as np
import matplotlib.pyplot as plt
cnx = mysql.connector.connect(user='root', password='sid',host='localhost', database='Sherlock')
cursor = cnx.cursor()
lu = []
l1 = []
l2 = []
l3 = []
l4 = []
cursor.execute("""SELECT timestamp FROM Text where timestamp<1411385659""")
l1 = cursor.fetchall()
cursor.execute("""SELECT timestamp FROM Photo where timestamp<1411385659""")
l2 = cursor.fetchall()
cursor.execute("""SELECT timestamp FROM Chat where timestamp<1411385659""")
l3 = cursor.fetchall()
cursor.execute("""SELECT timestamp FROM Quote where timestamp<1411385659""")
l4 = cursor.fetchall()
for i in l1:
lu.append(i)
for i in l2:
lu.append(i)
for i in l3:
lu.append(i)
for i in l4:
lu.append(i)
loo_pos = []
'''
lis = []
lis = [1498015802,1500287402,1500892202,1501497002,1502101802,1502706602,1503311402,1503916202,1703916202]
for i in range(1,len(lis)):
op = []
for j in lu:
if int(j[0])<lis[i] and int(j[0])>lis[i-1]:
op.append('h')
loo_pos.append(len(op))
print(loo_pos)
names = ['Trailer','Episode 1','Episode 2','Episode 3','Episode 4','Episode 5','Episode 6','Episode 7']
x = np.array([0,1,2,3,4,5,6,7])
plt.xticks(x, names)
plt.plot(x, loo_pos,'-o',color='g')
plt.show()
'''
lis = []
lis = [1347017695, 1348832095, 1349436895, 1350041695, 1350646495, 1351251295, 1351856095, 1352460895, 1353065695, 1354275295, 1354880095, 1355484895, 1357299295, 1357904095, 1359718495, 1360323295, 1360928095, 1361532895, 1362742495, 1363347295, 1365161695, 1366976095, 1367580895, 1368185695, 1368790495]
for i in range(1,len(lis)+1):
op = []
for j in lu:
if int(j[0])>=lis[i-1] and int(j[0])<lis[i-1]+604800:
op.append('h')
loo_pos.append(len(op))
print(loo_pos)
names = ['Trailer','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','E24']
x = np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24])
plt.title('SEASON 6')
plt.xticks(x, names)
plt.plot(x, loo_pos,'-o',color='g')
plt.savefig('1s6_BBT.png')
plt.show()
lu = []
l1 = []
l2 = []
l3 = []
l4 = []
cursor.execute("""SELECT timestamp FROM Text where timestamp>1411385659""")
l1 = cursor.fetchall()
cursor.execute("""SELECT timestamp FROM Photo where timestamp>1411385659""")
l2 = cursor.fetchall()
cursor.execute("""SELECT timestamp FROM Chat where timestamp>1411385659""")
l3 = cursor.fetchall()
cursor.execute("""SELECT timestamp FROM Quote where timestamp>1411385659""")
l4 = cursor.fetchall()
for i in l1:
lu.append(i)
for i in l2:
lu.append(i)
for i in l3:
lu.append(i)
for i in l4:
lu.append(i)
loo_pos = []
'''
lis = []
lis = [1498015802,1500287402,1500892202,1501497002,1502101802,1502706602,1503311402,1503916202,1703916202]
for i in range(1,len(lis)):
op = []
for j in lu:
if int(j[0])<lis[i] and int(j[0])>lis[i-1]:
op.append('h')
loo_pos.append(len(op))
print(loo_pos)
names = ['Trailer','Episode 1','Episode 2','Episode 3','Episode 4','Episode 5','Episode 6','Episode 7']
x = np.array([0,1,2,3,4,5,6,7])
plt.xticks(x, names)
plt.plot(x, loo_pos,'-o',color='g')
plt.show()
'''
lis = []
lis = [1472556895, 1474371295, 1474976095, 1475580895, 1476185695, 1476790495, 1477654495, 1478259295, 1478864095, 1479468895, 1480678495, 1481888095, 1483702495, 1484912095, 1486121695, 1486726495, 1487331295, 1487936095, 1489145695, 1490960095, 1491564895, 1492169695, 1493379295, 1493984095, 1494588895]
for i in range(1,len(lis)+1):
op = []
for j in lu:
if int(j[0])<lis[i-1]+604800 and int(j[0])>=lis[i-1]:
op.append('h')
loo_pos.append(len(op))
print(loo_pos)
names = ['Trailer','1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','23','E24']
x = np.array([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24])
plt.title('SEASON 10')
plt.xticks(x, names)
plt.plot(x, loo_pos,'-o',color='g')
plt.savefig('1s10_BBT.png')
plt.show() | [
"f20150023@goa.bits-pilani.ac.in"
] | f20150023@goa.bits-pilani.ac.in |
65445b5fb4e4a871a548483e374c5ba709fe7489 | 8aeac45b73139fa5a760eaeaf4a03892a664d3d0 | /meu_projeto/meu_projeto/urls.py | 367d0573682b89f959b39a2252a67c3c2e68a6cd | [] | no_license | mabner/fby-aula4 | 67772d5df7fbba662bbea1a276aee569da0e19cb | 14f8eea9928aa26d46c5de0448afe5a4ab8258d4 | refs/heads/master | 2023-06-09T22:16:47.226545 | 2021-07-04T00:34:37 | 2021-07-04T00:34:37 | 381,869,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 165 | py | from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('', include('meuapp.urls')),
path('admin/', admin.site.urls),
]
| [
"30819867+mabner@users.noreply.github.com"
] | 30819867+mabner@users.noreply.github.com |
8b18bbf9c096d8be86fcb9300c6d2ebda74a9b51 | 2b1ee4b583bef5ebdb0de83e5a0c8eea02ed8f74 | /cifar10_cnn_example.py | 257b834d9378fb846afcf943c58e7e8b128f4a1c | [] | no_license | MinJae1/MachineLearning | 1a6508e04b7365129b4e548bd0c59860bf4f2542 | b6ce1c678176480715afa3e082c4aefb78d51d19 | refs/heads/master | 2020-04-17T07:17:57.639374 | 2019-01-21T03:57:32 | 2019-01-21T03:57:32 | 166,363,199 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,507 | py |
#출처 http://solarisailab.com/archives/2325
# -*- coding: utf-8 -*-
"""
CIFAR-10 Convolutional Neural Networks(CNN) Example
next_batch function is copied from edo's answer
https://stackoverflow.com/questions/40994583/how-to-implement-tensorflows-next-batch-for-own-data
Author : solaris33
Project URL : http://solarisailab.com/archives/2325
"""
import tensorflow as tf
import numpy as np
# CIFAR-10 데이터를 다운로드 받기 위한 keras의 helper 함수인 load_data 함수를 임포트합니다.
from tensorflow.keras.datasets.cifar10 import load_data
# 다음 배치를 읽어오기 위한 next_batch 유틸리티 함수를 정의합니다.
def next_batch(num, data, labels):
'''
`num` 개수 만큼의 랜덤한 샘플들과 레이블들을 리턴합니다.
'''
idx = np.arange(0, len(data))
np.random.shuffle(idx)
idx = idx[:num]
data_shuffle = [data[i] for i in idx]
labels_shuffle = [labels[i] for i in idx]
return np.asarray(data_shuffle), np.asarray(labels_shuffle)
# CNN 모델을 정의합니다.
def build_CNN_classifier(x):
# 입력 이미지
x_image = x
# 첫번째 convolutional layer - 하나의 grayscale 이미지를 64개의 특징들(feature)으로 맵핑(maping)합니다.
W_conv1 = tf.Variable(tf.truncated_normal(shape=[5, 5, 3, 64], stddev=5e-2))
b_conv1 = tf.Variable(tf.constant(0.1, shape=[64]))
h_conv1 = tf.nn.relu(tf.nn.conv2d(x_image, W_conv1, strides=[1, 1, 1, 1], padding='SAME') + b_conv1)
# 첫번째 Pooling layer
h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
# 두번째 convolutional layer - 32개의 특징들(feature)을 64개의 특징들(feature)로 맵핑(maping)합니다.
W_conv2 = tf.Variable(tf.truncated_normal(shape=[5, 5, 64, 64], stddev=5e-2))
b_conv2 = tf.Variable(tf.constant(0.1, shape=[64]))
h_conv2 = tf.nn.relu(tf.nn.conv2d(h_pool1, W_conv2, strides=[1, 1, 1, 1], padding='SAME') + b_conv2)
# 두번째 pooling layer.
h_pool2 = tf.nn.max_pool(h_conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
# 세번째 convolutional layer
W_conv3 = tf.Variable(tf.truncated_normal(shape=[3, 3, 64, 128], stddev=5e-2))
b_conv3 = tf.Variable(tf.constant(0.1, shape=[128]))
h_conv3 = tf.nn.relu(tf.nn.conv2d(h_pool2, W_conv3, strides=[1, 1, 1, 1], padding='SAME') + b_conv3)
# 네번째 convolutional layer
W_conv4 = tf.Variable(tf.truncated_normal(shape=[3, 3, 128, 128], stddev=5e-2))
b_conv4 = tf.Variable(tf.constant(0.1, shape=[128]))
h_conv4 = tf.nn.relu(tf.nn.conv2d(h_conv3, W_conv4, strides=[1, 1, 1, 1], padding='SAME') + b_conv4)
# 다섯번째 convolutional layer
W_conv5 = tf.Variable(tf.truncated_normal(shape=[3, 3, 128, 128], stddev=5e-2))
b_conv5 = tf.Variable(tf.constant(0.1, shape=[128]))
h_conv5 = tf.nn.relu(tf.nn.conv2d(h_conv4, W_conv5, strides=[1, 1, 1, 1], padding='SAME') + b_conv5)
# Fully Connected Layer 1 - 2번의 downsampling 이후에, 우리의 32x32 이미지는 8x8x128 특징맵(feature map)이 됩니다.
# 이를 384개의 특징들로 맵핑(maping)합니다.
W_fc1 = tf.Variable(tf.truncated_normal(shape=[8 * 8 * 128, 384], stddev=5e-2))
b_fc1 = tf.Variable(tf.constant(0.1, shape=[384]))
h_conv5_flat = tf.reshape(h_conv5, [-1, 8 * 8 * 128])
h_fc1 = tf.nn.relu(tf.matmul(h_conv5_flat, W_fc1) + b_fc1)
# Dropout - 모델의 복잡도를 컨트롤합니다. 특징들의 co-adaptation을 방지합니다.
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
# Fully Connected Layer 2 - 384개의 특징들(feature)을 10개의 클래스-airplane, automobile, bird...-로 맵핑(maping)합니다.
W_fc2 = tf.Variable(tf.truncated_normal(shape=[384, 10], stddev=5e-2))
b_fc2 = tf.Variable(tf.constant(0.1, shape=[10]))
logits = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
y_pred = tf.nn.softmax(logits)
return y_pred, logits
# 인풋 아웃풋 데이터, 드롭아웃 확률을 입력받기위한 플레이스홀더를 정의합니다.
x = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
y = tf.placeholder(tf.float32, shape=[None, 10])
keep_prob = tf.placeholder(tf.float32)
# CIFAR-10 데이터를 다운로드하고 데이터를 불러옵니다.
(x_train, y_train), (x_test, y_test) = load_data()
# scalar 형태의 레이블(0~9)을 One-hot Encoding 형태로 변환합니다.
y_train_one_hot = tf.squeeze(tf.one_hot(y_train, 10), axis=1)
y_test_one_hot = tf.squeeze(tf.one_hot(y_test, 10), axis=1)
# Convolutional Neural Networks(CNN) 그래프를 생성합니다.
y_pred, logits = build_CNN_classifier(x)
# Cross Entropy를 비용함수(loss function)으로 정의하고, RMSPropOptimizer를 이용해서 비용 함수를 최소화합니다.
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=logits))
train_step = tf.train.RMSPropOptimizer(1e-3).minimize(loss)
# 정확도를 계산하는 연산을 추가합니다.
correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 세션을 열어 실제 학습을 진행합니다.
with tf.Session() as sess:
# 모든 변수들을 초기화한다.
sess.run(tf.global_variables_initializer())
# 10000 Step만큼 최적화를 수행합니다.
for i in range(10000):
batch = next_batch(128, x_train, y_train_one_hot.eval())
# 100 Step마다 training 데이터셋에 대한 정확도와 loss를 출력합니다.
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y: batch[1], keep_prob: 1.0})
loss_print = loss.eval(feed_dict={x: batch[0], y: batch[1], keep_prob: 1.0})
print("반복(Epoch): %d, 트레이닝 데이터 정확도: %f, 손실 함수(loss): %f" % (i, train_accuracy, loss_print))
# 20% 확률의 Dropout을 이용해서 학습을 진행합니다.
sess.run(train_step, feed_dict={x: batch[0], y: batch[1], keep_prob: 0.8})
# 학습이 끝나면 테스트 데이터(10000개)에 대한 정확도를 출력합니다.
test_accuracy = 0.0
for i in range(10):
test_batch = next_batch(1000, x_test, y_test_one_hot.eval())
test_accuracy = test_accuracy + accuracy.eval(feed_dict={x: test_batch[0], y: test_batch[1], keep_prob: 1.0})
test_accuracy = test_accuracy / 10;
print("테스트 데이터 정확도: %f" % test_accuracy) | [
"dywjdalswo@gmail.com"
] | dywjdalswo@gmail.com |
1fc5d80715c8372cd3f272aea6fed8b48b52f579 | 7dcb698aa42b9d76ccc04931d27b06f6cf16e72e | /Main.py | 420d61f60afbb45bdb96da721108a2c59c2bee0f | [] | no_license | VGAIN0/SQLite_b64_saver | 98def4856319c83cafc15c6a5e2f4830b8387991 | d26b2c10046fb63328a9025d5cc7b6a6b2eef6b5 | refs/heads/main | 2023-07-16T04:30:38.100763 | 2021-09-03T02:53:52 | 2021-09-03T02:53:52 | 402,624,138 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 780 | py | import os
def menu():
strs = ('Enter 1 for update\n'
'Enter 2 for import\n'
'Enter 3 for add \n'
'Enter 0 to exit : ')
choice = input(strs)
return int(choice)
print("/////")
x= int(input('Enter 1 for update\n'
'Enter 2 for import\n'
'Enter 3 for add \n'
'Enter 0 to exit : '))
while x != "":
if x==1:
if input("This operation will delete the old data\nare you sure? (y/n)") == "y":
exec(open('update.py').read())
else :
exit()
x = menu()
elif x==2:
exec(open('import.py').read())
x = menu()
elif x==3:
exec(open('add_to.py').read())
x = menu()
elif x==0:
break;
else :
print("put a valid number")
| [
"noreply@github.com"
] | VGAIN0.noreply@github.com |
7d4018d80c1c700ffbcc1aca2277bd38960a2240 | 203282a1919871b7a39882d349205033c16c19e0 | /gp_before_submodule_backup/acados/quadrotor_optimizer_q_mpc_q330_set_p.py | 83b5e398c057c3871920254ff9c782eabbba9948 | [] | no_license | FanJing1112/MA_Experiment | da5a40e584038009310cff548d4ef6b95050a1ae | 2b1e6c6caf61776eb354b92cd6fc6621f9d93752 | refs/heads/main | 2023-08-15T01:16:02.513069 | 2021-10-05T14:47:54 | 2021-10-05T14:47:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,662 | py | #!/usr/bin/env python3
# coding=UTF-8
'''
Author: Wei Luo
Date: 2021-03-21 22:27:50
LastEditors: Yinfeng Long
LastEditTime: 2021-08-30
'''
import os
import sys
from quadrotor_model_q_set_p import QuadRotorModel
from acados_template import AcadosOcp, AcadosOcpSolver, AcadosSimSolver
import casadi as ca
import scipy.linalg
import numpy as np
import time
import rospy
from geometry_msgs.msg import Twist, PoseStamped, AccelStamped
# from std_msgs.msg import Empty
from nav_msgs.msg import Odometry
from std_srvs.srv import SetBool, SetBoolResponse
from threading import Thread
from std_msgs.msg import Header
# from itm_nonlinear_mpc.msg import itm_trajectory_msg
from itm_mav_msgs.msg import itm_trajectory_msg
from mavros_msgs.msg import AttitudeTarget
class MPC_controller(object):
def __init__(self, quad_model, quad_constraints, t_horizon, n_nodes, sim_required=False):
self.model = quad_model
self.constraints = quad_constraints
self.g_ = 9.8066
self.T = t_horizon
self.N = n_nodes
self.simulation_required = sim_required
self.current_pose = None
self.current_state = None
self.dt = 0.05
self.rate = rospy.Rate(1 / self.dt)
self.time_stamp = None
self.trajectory_path = None
self.current_twist = np.zeros(3)
self.att_command = AttitudeTarget()
self.att_command.type_mask = 128
self.mpc_x_next_state = Odometry()
# subscribers
# the robot state
robot_state_sub_ = rospy.Subscriber(
'/robot_pose', Odometry, self.robot_state_callback)
robot_pose_sub = rospy.Subscriber(
'/real_pose', PoseStamped, self.robot_pose_callback)
self.is_velocity_init = False
self.current_time = None
self.current_position = None
self.previous_time = None
self.last_position = None
self.last_velocity = None
self.vel = None
# sub parameter p
gp_mean_sub = rospy.Subscriber(
'/gp_acceleration_world', AccelStamped, self.gp_mpc_callback)
self.gp_mean_accel_w = np.array([0, 0, 0])
self.is_gp_init = False
# trajectory
robot_trajectory_sub_ = rospy.Subscriber('/robot_trajectory', itm_trajectory_msg, self.trajectory_command_callback)
# publisher
# pub mpc x_next
self.mpc_x_next_pub = rospy.Publisher(
'/mpc_x_next_state', Odometry, queue_size=10)
# get u (after setting p)
self.att_setpoint_pub = rospy.Publisher(
'/set_p_control', AttitudeTarget, queue_size=1)
self.control_offset = rospy.get_param('/offboard_mpc_controller/control_offset')
# self.att_setpoint_pub = rospy.Publisher(
# '/mavros/setpoint_raw/attitude', AttitudeTarget, queue_size=1)
# create a server
server_ = rospy.Service('uav_mpc_server', SetBool, self.state_server)
# setup optimizer
self.quadrotor_optimizer_setup()
# # It seems that thread cannot ensure the performance of the time
self.att_thread = Thread(target=self.send_command, args=())
self.att_thread.daemon = True
self.att_thread.start()
self.trajectory_init = False
def gp_mpc_callback(self, msg):
# get gp predict value
if not self.is_gp_init:
# self.gp_mean_accel_w = np.array([0, 0, 0])
self.is_gp_init = True
else:
self.gp_mean_accel_w = np.array([msg.accel.linear.x, msg.accel.linear.y, msg.accel.linear.z])
def robot_state_callback(self, data):
# robot state as [x, y, z, [w, x, y, z], vx, vy, vz]
self.current_state = np.array([data.pose.pose.position.x, data.pose.pose.position.y, data.pose.pose.position.z, data.pose.pose.orientation.w, data.pose.pose.orientation.x,
data.pose.pose.orientation.y, data.pose.pose.orientation.z, data.twist.twist.linear.x, data.twist.twist.linear.y, data.twist.twist.linear.z, ]).reshape(1, -1)
def robot_pose_callback(self, msg):
# robot state as [x, y, z, qw, qx, qy, qz, vx, vy, vz]
self.current_time = rospy.get_time()
self.current_position = np.array([msg.pose.position.x, msg.pose.position.y, msg.pose.position.z])
self.velocity_estimation()
if self.vel is not None:
if msg.pose.orientation.w > 0:
self.current_state = np.array([msg.pose.position.x, msg.pose.position.y, msg.pose.position.z,
msg.pose.orientation.w, msg.pose.orientation.x, msg.pose.orientation.y,
msg.pose.orientation.z, self.vel[0], self.vel[1], self.vel[2] ])
elif msg.pose.orientation.w < 0:
self.current_state = np.array([msg.pose.position.x, msg.pose.position.y, msg.pose.position.z,
-msg.pose.orientation.w, -msg.pose.orientation.x, -msg.pose.orientation.y,
-msg.pose.orientation.z, self.vel[0], self.vel[1], self.vel[2] ])
else:
pass
def velocity_estimation(self, ):
if not self.is_velocity_init:
self.is_velocity_init = True
self.last_position = self.current_position
self.previous_time = self.current_time
self.last_velocity = np.array([0., 0., 0.])
else:
dt = self.current_time - self.previous_time
if dt>=0.01:
self.vel = (self.current_position - self.last_position)/(1e-5 + dt)
self.vel = 0.2 * self.vel + 0.8 * self.last_velocity
self.last_velocity = self.vel
self.previous_time = self.current_time
self.last_position = self.current_position
def trajectory_command_callback(self, data):
temp_traj = data.traj
if data.size != len(temp_traj):
rospy.logerr('Some data are lost')
else:
self.trajectory_path = np.zeros((data.size, 10))
# print("data.size = %d" % data.size) # data.size =1
for i in range(data.size):
# quaternion_ = self.rpy_to_quaternion(
# [temp_traj[i].roll, temp_traj[i].pitch, temp_traj[i].yaw])
self.trajectory_path[i] = np.array([temp_traj[i].x,
temp_traj[i].y,
temp_traj[i].z,
temp_traj[i].q[0],
temp_traj[i].q[1],
temp_traj[i].q[2],
temp_traj[i].q[3],
# quaternion_[0],
# quaternion_[1],
# quaternion_[2],
# quaternion_[3],
temp_traj[i].vx,
temp_traj[i].vy,
temp_traj[i].vz
])
def quadrotor_optimizer_setup(self, ):
# Q_m_ = np.diag([10, 10, 10,
# 0.3, 0.3, 0.3, 0.3,
# 0.05, 0.05, 0.05,
# ]) # position, q, v
# P_m_ = np.diag([10, 10, 10,
# 0.05, 0.05, 0.05]) # only p and v
# R_m_ = np.diag([5.0, 5.0, 5.0, 0.6])
# parameters of c_generated_code/uav_q (safe_takeoff)
Q_m_ = np.diag([10, 10, 10,
0.8, 0.8, 0.8, 0.8,
0.05, 0.05, 0.05,
]) # position, q, v
P_m_ = np.diag([
10, # x
10, # y
20, # z
0.05, # vx
0.05, # vy
0.05 # vz
])
R_m_ = np.diag([1.0, 1.0, 2.0, 0.6])
nx = self.model.x.size()[0]
self.nx = nx
nu = self.model.u.size()[0]
self.nu = nu
ny = nx + nu
n_params = self.model.p.size()[0] if isinstance(
self.model.p, ca.SX) else 0
acados_source_path = os.environ['ACADOS_SOURCE_DIR']
sys.path.insert(0, acados_source_path)
# create OCP
ocp = AcadosOcp()
ocp.acados_include_path = acados_source_path + '/include'
ocp.acados_lib_path = acados_source_path + '/lib'
ocp.model = self.model
ocp.dims.N = self.N
ocp.solver_options.tf = self.T
# initialize parameters
ocp.dims.np = n_params
ocp.parameter_values = np.zeros(n_params)
# cost type
ocp.cost.cost_type = 'LINEAR_LS'
ocp.cost.cost_type_e = 'LINEAR_LS'
ocp.cost.W = scipy.linalg.block_diag(Q_m_, R_m_)
ocp.cost.W_e = P_m_
ocp.cost.Vx = np.zeros((ny, nx))
ocp.cost.Vx[:nx, :nx] = np.eye(nx)
ocp.cost.Vu = np.zeros((ny, nu))
ocp.cost.Vu[-nu:, -nu:] = np.eye(nu)
ocp.cost.Vx_e = np.zeros((nx - 4, nx))
# ocp.cost.Vx_e[:6, :6] = np.eye(6)
ocp.cost.Vx_e[:3, :3] = np.eye(3)
ocp.cost.Vx_e[-3:, -3:] = np.eye(3)
# initial reference trajectory_ref
x_ref = np.zeros(nx)
x_ref[3] = 1.0
x_ref_e = np.zeros(nx - 4)
u_ref = np.zeros(nu)
u_ref[-1] = self.g_
ocp.cost.yref = np.concatenate((x_ref, u_ref))
ocp.cost.yref_e = x_ref_e
# Set constraints
ocp.constraints.lbu = np.array([self.constraints.roll_rate_min, self.constraints.pitch_rate_min,
self.constraints.yaw_rate_min, self.constraints.thrust_min])
ocp.constraints.ubu = np.array([self.constraints.roll_rate_max, self.constraints.pitch_rate_max,
self.constraints.yaw_rate_max, self.constraints.thrust_max])
ocp.constraints.idxbu = np.array([0, 1, 2, 3])
# initial state
ocp.constraints.x0 = x_ref
# solver options
ocp.solver_options.qp_solver = 'FULL_CONDENSING_HPIPM'
ocp.solver_options.hessian_approx = 'GAUSS_NEWTON'
# explicit Runge-Kutta integrator
ocp.solver_options.integrator_type = 'ERK'
ocp.solver_options.print_level = 0
ocp.solver_options.nlp_solver_type = 'SQP' # 'SQP_RTI'
# ocp.solver_options.nlp_solver_max_iter = 400
# parameters of c_generated_code/uav_q
ocp.solver_options.nlp_solver_max_iter = 200
# compile acados ocp
# files are stored in .ros/
json_file = os.path.join('./' + self.model.name + '_acados_ocp.json')
self.solver = AcadosOcpSolver(ocp, json_file=json_file)
if self.simulation_required:
self.integrator = AcadosSimSolver(ocp, json_file=json_file)
def mpc_estimation_loop(self,):
t1 = time.time()
if not self.trajectory_init:
self.trajectory_path = np.zeros((1, 10))
self.trajectory_path[0] = np.array([0, 0, 0.4, 1, 0, 0, 0, 0, 0, 0])
self.trajectory_init = True
else:
pass
# if self.trajectory_path is not None and self.current_state is not None:
if self.current_state is not None:
current_trajectory = self.trajectory_path
u_des = np.array([0.0, 0.0, 0.0, self.g_])
self.solver.set(self.N, 'yref', np.concatenate(
(current_trajectory[-1, :3], current_trajectory[-1, -3:])))
for i in range(self.N):
self.solver.set(i, 'yref', np.concatenate(
(current_trajectory[0], u_des)))
self.solver.set(0, 'lbx', self.current_state.flatten())
self.solver.set(0, 'ubx', self.current_state.flatten())
# set parameter
for i in range(self.N):
self.solver.set(i, 'p', self.gp_mean_accel_w)
status = self.solver.solve()
if status != 0:
rospy.logerr("MPC cannot find a proper solution.")
# self.att_command.thrust = 0.5
# self.att_command.thrust = 0.59
self.att_command.thrust = self.control_offset
self.att_command.body_rate.z = 0.0
self.att_command.body_rate.x = 0.0
self.att_command.body_rate.y = 0.0
# only for debug
# print(self.trajectory_path)
# print("----")
# print(self.current_state)
else:
mpc_x_ = self.solver.get(1, 'x')
mpc_u_ = self.solver.get(0, 'u')
self.att_command.body_rate.x = mpc_u_[0]
self.att_command.body_rate.y = mpc_u_[1]
self.att_command.body_rate.z = mpc_u_[2]
# q330, circle_40s, control_offset=0.46
self.att_command.thrust = mpc_u_[3] / self.g_ * self.control_offset
self.mpc_x_next_state.pose.pose.position.x = mpc_x_[0]
self.mpc_x_next_state.pose.pose.position.y = mpc_x_[1]
self.mpc_x_next_state.pose.pose.position.z = mpc_x_[2]
self.mpc_x_next_state.pose.pose.orientation.w = mpc_x_[3]
self.mpc_x_next_state.pose.pose.orientation.x = mpc_x_[4]
self.mpc_x_next_state.pose.pose.orientation.y = mpc_x_[5]
self.mpc_x_next_state.pose.pose.orientation.z = mpc_x_[6]
self.mpc_x_next_state.twist.twist.linear.x = mpc_x_[7]
self.mpc_x_next_state.twist.twist.linear.y = mpc_x_[8]
self.mpc_x_next_state.twist.twist.linear.z = mpc_x_[9]
# self.att_setpoint_pub.publish(self.att_command)
self.mpc_x_next_pub.publish(self.mpc_x_next_state)
else:
if self.trajectory_path is None:
rospy.loginfo("waiting trajectory")
elif self.current_state is None:
rospy.loginfo("waiting current state")
else:
rospy.loginfo("Unknown error")
self.rate.sleep()
# print(time.time()-t1)
return True
@staticmethod
def quaternion_to_rpy(quaternion):
q0, q1, q2, q3 = quaternion.w, quaternion.x, quaternion.y, quaternion.z
roll_ = np.arctan2(2 * (q0 * q1 + q2 * q3), 1 - 2 * (q1**2 + q2**2))
pitch_ = np.arcsin(2 * (q0 * q2 - q3 * q1))
yaw_ = np.arctan2(2 * (q0 * q3 + q1 * q2), 1 - 2 * (q2**2 + q3**2))
return roll_, pitch_, yaw_
@staticmethod
def rpy_to_quaternion(rqy):
roll_, pitch_, yaw_ = rqy
cy = np.cos(yaw_ * 0.5)
sy = np.sin(yaw_ * 0.5)
cp = np.cos(pitch_ * 0.5)
sp = np.sin(pitch_ * 0.5)
cr = np.cos(roll_ * 0.5)
sr = np.sin(roll_ * 0.5)
w_ = cr * cp * cy + sr * sp * sy
x_ = sr * cp * cy - cr * sp * sy
y_ = cr * sp * cy + sr * cp * sy
z_ = cr * cp * sy - sr * sp * cy
return np.array([w_, x_, y_, z_])
def state_server(self, req):
return SetBoolResponse(True, 'MPC is ready')
def send_command(self,):
rate = rospy.Rate(100) # Hz
self.att_command.header = Header()
while not rospy.is_shutdown():
# t2 = time.time()
command_ = self.att_command
self.att_command.header.stamp = rospy.Time.now()
self.att_setpoint_pub.publish(command_)
try: # prevent garbage in console output when thread is killed
rate.sleep()
except rospy.ROSInterruptException:
pass
# print("publsich loop takes {} seconds".format(time.time() - t2))
if __name__ == '__main__':
rospy.init_node('offboard_mpc_controller')
quad_rotor_model = QuadRotorModel()
try:
mpc_obj = MPC_controller(quad_model=quad_rotor_model.model,
quad_constraints=quad_rotor_model.constraints,
t_horizon=2.,
n_nodes=20
)
mpc_model_is_ready = True
except ImportError:
rospy.logerr('Cannot find any MPC library, Stop the node')
mpc_model_is_ready = False
mpc_obj = None
while not rospy.is_shutdown() and mpc_model_is_ready:
if not mpc_obj.mpc_estimation_loop():
rospy.logerr("MPC estimation failed")
print('MPC controller is shutdown')
| [
"yinfenglong@hotmail.com"
] | yinfenglong@hotmail.com |
ee50c345a81f3fc46700bd9e87309e0b279cdf01 | 4074a22464d2d832ad70e7b389740410f4917ca1 | /run-benchmark | 5186a63d3a9448284bdee0df118cd772596a7fba | [
"BSD-3-Clause"
] | permissive | iiPing/lighty-template | 72ae37fbe70c091345ec31f01b7a5ba87bd8238a | 63834fbb2421506205745bb596ff8ac726361f2a | refs/heads/master | 2020-12-11T07:53:43.449757 | 2012-06-15T06:42:55 | 2012-06-15T06:42:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | #!/usr/bin/python
from benchmark import *
| [
"undeadgrandse@gmail.com"
] | undeadgrandse@gmail.com | |
ecd14774b54a42730e50d7553215c1bd7b04b74f | 2e14ab5174692c4fc872d1cd4f942b597273c2bd | /models/base_model.py | 242f843ec4046883359c43beec13cfad6082f0d2 | [] | no_license | Anustup900/africa_poverty | b355bcfd1c6cdb0934c6b184b444eb422e513172 | c25ec6bbc89a87122c34719176f19474b6b4c72f | refs/heads/master | 2022-11-22T01:40:33.552717 | 2020-06-11T23:54:35 | 2020-06-11T23:54:35 | 286,533,321 | 1 | 0 | null | 2020-08-10T17:03:54 | 2020-08-10T17:03:54 | null | UTF-8 | Python | false | false | 2,149 | py | class BaseModel(object):
'''The base class of models'''
def __init__(self, inputs, num_outputs, is_training, fc_reg, conv_reg):
'''
Args
- inputs: tf.Tensor, shape [batch_size, H, W, C], type float32
- num_outputs: int, number of output classes
set to None if we are only extracting features
- is_training: bool, or tf.placeholder of type tf.bool
- fc_reg: float, regularization for weights in the fully-connected layer
- conv_reg: float, regularization for weights in the conv layers
'''
self.inputs = inputs
self.num_outputs = num_outputs
self.is_training = is_training
self.fc_reg = fc_reg
self.conv_reg = conv_reg
# in subclasses, these should be initialized during __init__()
self.outputs = None # tf.Tensor, shape [batch_size, num_outputs]
self.features_layer = None # tf.Tensor, shape [batch_size, num_features]
def init_from_numpy(self, path, sess, *args, **kwargs):
'''
Args:
- path: str, path to saved weights
- sess: tf.Session
'''
raise NotImplementedError
def get_first_layer_weights(self):
'''Gets the weights in the first layer of the CNN
Returns: tf.Tensor
'''
raise NotImplementedError
def get_final_layer_weights(self):
'''Gets the weights in the final fully-connected layer after the conv layers.
Returns: list of tf.Tensor
'''
raise NotImplementedError
def get_first_layer_summaries(self, ls_bands=None, nl_band=None):
'''Creates the following summaries:
- histogram of weights in 1st conv layer
- (if model includes batch-norm layer) histogram of 1st batch-norm layer's moving mean
Args
- ls_bands: one of [None, 'rgb', 'ms'], if 'ms' then add separate histograms for RGB vs. other
channel weights the first layer of the CNN
- nl_band: one of [None, 'split', 'merge']
Returns
- summaries: tf.summary, merged summaries
'''
raise NotImplementedError
| [
"chrisyeh96@gmail.com"
] | chrisyeh96@gmail.com |
66694eb7078793966c8a6e9c670afb2ec01a2013 | 444cde813fd09111953c424200575d0a4efd99c2 | /sbhelpkit/utils/extra_typings.py | c8402a4239d08c8138bd419b68fba78d5a463c81 | [
"WTFPL"
] | permissive | MapleCCC/Session-Buddy-Helpkit | 9763651ec2ee793330ef2fd64d9bc6913ed22d44 | c9555834bf7e8ca41a59155fdf2723e4e4869381 | refs/heads/master | 2020-09-06T17:45:30.602746 | 2020-01-13T17:15:32 | 2020-01-13T17:17:55 | 220,499,154 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | from typing import *
# Reference:
# https://docs.python.org/3/library/json.html#encoders-and-decoders
# https://github.com/python/typing/issues/182#issuecomment-186684288
JSONType = Union[
Dict[str, Any], List[Any], str, int, float, bool, None,
]
JSONObject = Dict[str, Any]
JSONArray = List[Any]
| [
"littlelittlemaple@gmail.com"
] | littlelittlemaple@gmail.com |
e231ce0ec8866c4ae5188bd2779dee6f35a69e9e | cd9050426b9404d24aa2c49fff4ce0db0727471e | /0x03-python-data_structures/6-print_matrix_integer.py | 55a54fe3389c036f8d2e645c572371a144e977de | [] | no_license | engemp/holbertonschool-higher_level_programming | cef61a7f3057a8a055ea3cccce24f7a5817ce4fa | b7d720d093a77252d19e227d1ffef33dbca0af90 | refs/heads/master | 2022-07-20T01:18:03.457407 | 2020-05-15T22:37:00 | 2020-05-15T22:37:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | #!/usr/bin/python3
def print_matrix_integer(matrix=[[]]):
print('\n'.join([' '.join(['{:d}'.format(i) for i in row])
for row in matrix]))
| [
"aikomi1726@gmail.com"
] | aikomi1726@gmail.com |
d00f8a5113df64077c306d43ae28a8fd05eda42a | ae4e517aebe74a851df977af1a11d2a67120050c | /h2o-py/tests/testdir_munging/unop/pyunit_expr_math_ops.py | 970952c2b4e4d21a1f40dda8da7beca2cf42bea5 | [
"Apache-2.0"
] | permissive | StephaneFeniar/h2o-dev | 8dd06549ddee490d6db5b7dd41f043e061cee121 | 2c0c69aeda69d08be5edce330bf34898e9b2ab2b | refs/heads/master | 2021-01-14T08:51:40.694426 | 2015-04-18T21:01:23 | 2015-04-18T21:01:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,544 | py | import sys
sys.path.insert(1, "../../../")
import h2o
import numpy as np
import random
import math
import scipy.special
def expr_math_ops(ip,port):
# Connect to h2o
h2o.init(ip,port)
sin_cos_tan_atan_sinh_cosh_tanh_asinh_data = [[random.uniform(-10,10) for r in range(10)] for c in range(10)]
asin_acos_atanh_data = [[random.uniform(-1,1) for r in range(10)] for c in range(10)]
acosh_data = [[random.uniform(1,10) for r in range(10)] for c in range(10)]
abs_data = [[random.uniform(-100000,0) for r in range(10)] for c in range(10)]
h2o_data1 = h2o.H2OFrame(python_obj=sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)
h2o_data2 = h2o.H2OFrame(python_obj=asin_acos_atanh_data)
h2o_data3 = h2o.H2OFrame(python_obj=acosh_data)
h2o_data4 = h2o.H2OFrame(python_obj=abs_data)
np_data1 = np.array(sin_cos_tan_atan_sinh_cosh_tanh_asinh_data)
np_data2 = np.array(asin_acos_atanh_data)
np_data3 = np.array(acosh_data)
np_data4 = np.array(abs_data)
row, col = h2o_data1.dim()
def check_values(h2o_data, numpy_data):
success = True
for i in range(10):
r = random.randint(0,row-1)
c = random.randint(0,col-1)
h2o_val = h2o.as_list(h2o_data[r,c])[0][0]
num_val = numpy_data[r,c]
if not abs(h2o_val - num_val) < 1e-06:
success = False
print "check unsuccessful! h2o computed {0} and numpy computed {1}".format(h2o_val,num_val)
return success
h2o_data1 = h2o_data1 + 2
h2o_data2 = h2o_data2 / 1.01
h2o_data3 = h2o_data3 * 1.5
h2o_data4 = h2o_data4 - 1.5
np_data1 = np_data1 + 2
np_data2 = np_data2 / 1.01
np_data3 = np_data3 * 1.5
np_data4 = np_data4 - 1.5
assert check_values(h2o.cos(h2o_data1), np.cos(np_data1)), "expected equal cos values between h2o and numpy"
assert check_values(h2o.sin(h2o_data1), np.sin(np_data1)), "expected equal sin values between h2o and numpy"
assert check_values(h2o.tan(h2o_data1), np.tan(np_data1)), "expected equal tan values between h2o and numpy"
assert check_values(h2o.acos(h2o_data2), np.arccos(np_data2)), "expected equal acos values between h2o and numpy"
assert check_values(h2o.asin(h2o_data2), np.arcsin(np_data2)), "expected equal asin values between h2o and numpy"
assert check_values(h2o.atan(h2o_data1), np.arctan(np_data1)), "expected equal atan values between h2o and numpy"
assert check_values(h2o.cosh(h2o_data1), np.cosh(np_data1)), "expected equal cosh values between h2o and numpy"
assert check_values(h2o.sinh(h2o_data1), np.sinh(np_data1)), "expected equal sinh values between h2o and numpy"
assert check_values(h2o.tanh(h2o_data1), np.tanh(np_data1)), "expected equal tanh values between h2o and numpy"
assert check_values(h2o.acosh(h2o_data3), np.arccosh(np_data3)), "expected equal acosh values between h2o and numpy"
assert check_values(h2o.asinh(h2o_data1), np.arcsinh(np_data1)), "expected equal asinh values between h2o and numpy"
assert check_values(h2o.atanh(h2o_data2), np.arctanh(np_data2)), "expected equal atanh values between h2o and numpy"
assert check_values(h2o.cospi(h2o_data2/math.pi), np.cos(np_data2)), "expected equal cospi values between h2o and numpy"
assert check_values(h2o.sinpi(h2o_data2/math.pi), np.sin(np_data2)), "expected equal sinpi values between h2o and numpy"
assert check_values(h2o.tanpi(h2o_data2/math.pi), np.tan(np_data2)), "expected equal tanpi values between h2o and numpy"
assert check_values(h2o.abs(h2o_data4), np.fabs(np_data4)), "expected equal abs values between h2o and numpy"
assert check_values(h2o.sign(h2o_data2), np.sign(np_data2)), "expected equal sign values between h2o and numpy"
assert check_values(h2o.sqrt(h2o_data3), np.sqrt(np_data3)), "expected equal sqrt values between h2o and numpy"
assert check_values(h2o.trunc(h2o_data3), np.trunc(np_data3)), "expected equal trunc values between h2o and numpy"
assert check_values(h2o.ceil(h2o_data3), np.ceil(np_data3)), "expected equal ceil values between h2o and numpy"
assert check_values(h2o.floor(h2o_data3), np.floor(np_data3)), "expected equal floor values between h2o and numpy"
assert check_values(h2o.log(h2o_data3), np.log(np_data3)), "expected equal log values between h2o and numpy"
assert check_values(h2o.log10(h2o_data3), np.log10(np_data3)), "expected equal log10 values between h2o and numpy"
assert check_values(h2o.log1p(h2o_data3), np.log1p(np_data3)), "expected equal log1p values between h2o and numpy"
assert check_values(h2o.log2(h2o_data3), np.log2(np_data3)), "expected equal log2 values between h2o and numpy"
assert check_values(h2o.exp(h2o_data3), np.exp(np_data3)), "expected equal exp values between h2o and numpy"
assert check_values(h2o.expm1(h2o_data3), np.expm1(np_data3)), "expected equal expm1 values between h2o and numpy"
h2o_val = h2o.as_list(h2o.gamma(h2o_data3))[5][5]
num_val = math.gamma(h2o.as_list(h2o_data3)[5][5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal gamma values between h2o and math".format(h2o_val,num_val)
h2o_val = h2o.as_list(h2o.lgamma(h2o_data3))[5][5]
num_val = math.lgamma(h2o.as_list(h2o_data3)[5][5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal lgamma values between h2o and math".format(h2o_val,num_val)
h2o_val = h2o.as_list(h2o.digamma(h2o_data3))[5][5]
num_val = scipy.special.polygamma(0,h2o.as_list(h2o_data3)[5][5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal digamma values between h2o and math".format(h2o_val,num_val)
h2o_val = h2o.as_list(h2o.trigamma(h2o_data3))[5][5]
num_val = scipy.special.polygamma(1,h2o.as_list(h2o_data3)[5][5])
assert abs(h2o_val - num_val) < max(abs(h2o_val), abs(num_val)) * 1e-6, \
"check unsuccessful! h2o computed {0} and math computed {1}. expected equal trigamma values between h2o and math".format(h2o_val,num_val)
if __name__ == "__main__":
h2o.run_test(sys.argv, expr_math_ops)
| [
"eric.eckstrand@gmail.com"
] | eric.eckstrand@gmail.com |
9e3db1a1ac5b4a95bb089cedabe3884181cb513c | 779a0dec61b3fd4e005f06fa38fef52315802362 | /siteVotes/wsgi.py | 36fbd9c14717f58e2563dc0bdb4abd2e63570d6c | [] | no_license | PavelVor94/siteOfVotes | 9f55beb4ff6b70a441d33c5f8225aac79ce5bc24 | bfbc8ef7c01370a316511516e99c0b455928efb1 | refs/heads/main | 2023-03-10T22:57:40.871358 | 2021-02-25T17:34:25 | 2021-02-25T17:34:25 | 342,326,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for siteVotes project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'siteVotes.settings')
application = get_wsgi_application()
| [
"f16574@Yandex.ru"
] | f16574@Yandex.ru |
38287c4dba1ada604dd4db2c416bb21d3a73d3bf | cd01419108a2c0994ab7692a87b8df1eca6ac81f | /loadingtest.py | 5d060b32ba405cbdcf744d14c54248837cdbc0a0 | [] | no_license | djbartos93/APC-PDU-Config | fbb8dc8ed7e1a6711855f68f1d0c7373e10af130 | 07af7bd866ed85578fdcba13c7c24b3b97107d4f | refs/heads/master | 2021-01-02T09:02:33.498276 | 2015-04-02T03:18:56 | 2015-04-02T03:18:56 | 33,286,954 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 488 | py |
import sys
import time
def progressbar(it, prefix = "", size = 60):
count = len(it)
def _show(_i):
x = int(size*_i/count)
sys.stdout.write("%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), _i, count))
sys.stdout.flush()
_show(0)
for i, item in enumerate(it):
yield item
_show(i+1)
sys.stdout.write("\n")
sys.stdout.flush()
for i in progressbar(range(40), "Computing: ", 40):
time.sleep(1) # long computation
| [
"djbartos93@gmail.com"
] | djbartos93@gmail.com |
54784ae241ebb27af2105733d27895990c63c635 | d024ccbb4cc04af3866a4db1ac1d8c1d7395d909 | /boj/4673.py | 28d025abbfa54b5cb36be7af6190215810610b63 | [] | no_license | demetoir/ps-solved-code | ff0418dddd10f3b053c9b8d32af48027b10c8481 | f4d4fd2183176b083f2287c9d89c6d5a1e983cc5 | refs/heads/master | 2022-10-14T20:11:34.581439 | 2020-06-12T11:24:11 | 2020-06-12T11:24:11 | 68,782,768 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 135 | py | l=lambda n:n%10+l(n//10) if n>0 else 0
a=[1]*20002
for i in range(1,10000):a[l(i)+i]=0
for i in range(1,10000):
if a[i]==1:print(i) | [
"wnsqlehlswk@gmail.com"
] | wnsqlehlswk@gmail.com |
b1c4ea7298b6e81c0536a83c6bde99cc7fde0007 | 6d387d44bcbbae1db1c110554d2accd263b36946 | /friends with baylor the game.py | 765f68047e2a3b8cec2571d0ef4e4728060179a6 | [] | no_license | DaemeonH/Friends-with-Baylor | 8524b474e1631d8a1286129d4fc8a4aa85b3aa19 | 37df4552491ba936900a44c2dc47d8f8f865e3e4 | refs/heads/master | 2020-04-22T09:03:02.086047 | 2019-02-12T05:41:15 | 2019-02-12T05:41:15 | 170,256,805 | 0 | 0 | null | 2019-02-12T05:30:03 | 2019-02-12T05:18:56 | null | UTF-8 | Python | false | false | 10,573 | py | ##import sys
##import os
##
##def resource_path(relative_path):
## """ Get absolute path to resource, works for dev and for PyInstaller """
## base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
## return os.path.join(base_path, relative_path)
import time
import pygame
from pygame import*
from graphics import*
from intro import*
import intro
pygame.mixer.init()
pauseTime = 2
def title_screen():
global win
win = GraphWin("Friends with Baylor", 800, 550)
win.setBackground("red4")
baylor = Image(Point(400, 260), "baylor_red_bkgrnd.gif")
baylor.draw(win)
title = "Friends with Baylor"
draw_title = Text(Point(400, 50), title)
draw_title.setFace("courier")
draw_title.setTextColor("black")
draw_title.setSize(36)
draw_title.setStyle("bold italic")
draw_title.draw(win)
pygame.mixer.music.load("Edward_Shallow_-_01_-_The_Infinite_Railroad.mp3")
pygame.mixer.music.play(-1,0.0)
by = "DaemeoNation Games"
draw_by = Text(Point(400, 440), by)
draw_by.setFace("courier")
draw_by.setTextColor("orange2")
draw_by.setSize(18)
draw_by.setStyle("bold italic")
draw_by.draw(win)
start = "Click To Start"
draw_title = Text(Point(400, 500), start)
draw_title.setFace("courier")
draw_title.setTextColor("black")
draw_title.setSize(12)
draw_title.setStyle("bold italic")
draw_title.draw(win)
clickPoint = win.getMouse()
clear(win)
#pygame.mixer.music.stop()
#i know i can fade out the music instead of kill it
#time.sleep(1)
#win.close()
def intro_screen():
win.setBackground("gray4")
baylor = Image(Point(400, 260), "the-dark-forest-ART2.gif")
baylor.draw(win)
str1 = "You travel alone through a dark forest."
str2 = "On the path ahead,..."
str3 = "you see a man in leather armor approach."
str4 = "He carries a large axe!"
intro_strings = [str1, str2, str3, str4]
for idx, val in enumerate(intro_strings):
intro = Text(Point(400, 475), intro_strings[idx])
intro.setFace("courier")
intro.setTextColor("red")
intro.setSize(18)
intro.setStyle("bold italic")
intro.draw(win)
time.sleep(pauseTime)
intro.undraw()
clear(win)
def intro_screen2():
win.setBackground("gray3")
woodsman = Image(Point(400, 260), "woodsman.gif")
woodsman.draw(win)
str1 = "Hail, fair traveller!"
str2 = "I have but one simple question for you."
str3 = "Are you friends with Baylor?"
intro_strings2 = [str1, str2, str3]
for idx, val in enumerate(intro_strings2):
intro2 = Text(Point(400, 475), intro_strings2[idx])
intro2.setFace("courier")
intro2.setTextColor("green4")
intro2.setSize(18)
intro2.setStyle("bold italic")
intro2.draw(win)
time.sleep(pauseTime)
intro2.undraw()
yes_or_no()
def hesitate():
win.setBackground("gray3")
woodsman = Image(Point(400, 260), "woodsman.gif")
woodsman.draw(win)
str8 = "You hesitate, so I ask again."
str9 = "Are...you...friends...with...Baylor?"
intro_strings4 = [str8, str9]
for idx, val in enumerate(intro_strings4):
intro4 = Text(Point(400, 475), intro_strings4[idx])
intro4.setFace("courier")
intro4.setTextColor("green4")
intro4.setSize(18)
intro4.setStyle("bold italic")
intro4.draw(win)
time.sleep(pauseTime)
intro4.undraw()
yes_or_no()
def yes_or_no():
str4 = "Answer- 1. 'Yes'"
str5 = " 2. 'No'"
str6 = " or 3. 'Who in the HELL is Baylor?'"
str7 = " Please enter a 1, 2, or 3."
intro_strings3 = [str4, str5, str6, str7]
textpos_y = 400
for idx, val in enumerate(intro_strings3):
intro3 = Text(Point(150, textpos_y), intro_strings3[idx])
intro3.setFace("courier")
intro3.setTextColor("red4")
intro3.setSize(18)
intro3.setStyle("bold italic")
intro3.draw(win)
textpos_y = textpos_y + 30
answer = Entry(Point(450, 490), 4)
answer.draw(win)
inputStr = win.getKey()
clear(win)
try:
int_answer = int(inputStr)
if int_answer < 1 or int_answer > 3:
hesitate()
if int_answer >= 1 and int_answer <=4:
if int_answer == 1:
friend()
if int_answer == 2:
no_friend()
if int_answer == 3:
who()
except ValueError:
hesitate()
def no_friend():
win.setBackground("gray3")
woodsman = Image(Point(400, 260), "woodsman.gif")
woodsman.draw(win)
str10 = "Well... A bad situation for you, my friend."
str11 = "As my lord and master has ordered..."
str12 = "All who do not serve Baylor must die!"
intro_strings5 = [str10, str11, str12]
for idx, val in enumerate(intro_strings5):
intro5 = Text(Point(400, 475), intro_strings5[idx])
intro5.setFace("courier")
intro5.setTextColor("green4")
intro5.setSize(18)
intro5.setStyle("bold italic")
intro5.draw(win)
time.sleep(pauseTime)
intro5.undraw()
def friend():
win.setBackground("gray3")
woodsman = Image(Point(400, 260), "woodsman.gif")
woodsman.draw(win)
str13 = "Followers of Baylor are a scourge upon this land."
str14 = "All who serve Baylor must DIE!"
intro_strings6 = [str13, str14]
for idx, val in enumerate(intro_strings6):
intro6 = Text(Point(400, 475), intro_strings6[idx])
intro6.setFace("courier")
intro6.setTextColor("green4")
intro6.setSize(18)
intro6.setStyle("bold italic")
intro6.draw(win)
time.sleep(pauseTime)
intro6.undraw()
def who():
win.setBackground("gray3")
woodsman = Image(Point(400, 260), "woodsman.gif")
woodsman.draw(win)
str15 = "You have not heard of the great and powerful Baylor??"
str16 = "A pity for you."
str17 = "You will make an excellent sacrifice!"
str18 = "Prepare to DIE!"
intro_strings7 = [str15, str16, str17, str18]
for idx, val in enumerate(intro_strings7):
intro7 = Text(Point(400, 475), intro_strings7[idx])
intro7.setFace("courier")
intro7.setTextColor("green4")
intro7.setSize(18)
intro7.setStyle("bold italic")
intro7.draw(win)
time.sleep(pauseTime)
intro7.undraw()
def clear(win):
for item in win.items[:]:
item.undraw()
#win = GraphWin(..., autoflush=False)
win.update()
def forest_battle():
global foe1
clear(win)
win.setBackground("gray2")
forest1 = Image(Point(400, 250), "forest-map1.gif")
forest1.draw(win)
foe1 = Image(Point(400, 280), "woodsmanfoe.png")
foe1.draw(win)
player_move(win)
def player_move(win):
global play_center
global player
global k
player = Image(Point(200, 350), "knight.png")
player.draw(win)
dx, dy = 5, 5
while True:
k = win.checkKey()
if k == 'Left':
player.move(-dx, 0)
checkPos()
elif k == 'Right':
player.move(dx, 0)
checkPos()
elif k == "Up":
player.move(0, -dy)
checkPos()
elif k == "Down":
player.move(0, dy)
checkPos()
elif k == 'period':
break
win.close()
def checkPos(): #checks the position of the player and prohibits movement through foes
play_center = player.getAnchor()
foeC = foe1.getAnchor()
foeH = foe1.getHeight()
foeW = foe1.getWidth()
playH = player.getHeight()
playW = player.getWidth()
print(play_center)
print(foeC)
if play_center.y == (foeC.y + foeH/2) and \
play_center.x >= (foeC.x - foeW/2 - playW/4) and \
play_center.x <= (foeC.x + foeW/2 + playW/4 ):
while True:
k = win.checkKey()
if k == "Up":
player.move(0, 0)
print("don't")
#player.getAnchor()
win.checkKey()
elif k == "Down":
break
if play_center.y == (foeC.y - foeH/2 - playH/4) and \
play_center.x >= (foeC.x - foeW/2 - playW/4) and \
play_center.x <= (foeC.x + foeW/2 + playW/4):
while True:
k = win.checkKey()
if k == "Down":
player.move(0, 0)
print("don't")
#player.getAnchor()
win.checkKey()
elif k == "Up":
break
if play_center.x == (foeC.x - foeW/2 - playW/4) and \
play_center.y >= (foeC.y - foeH/2 - playH/4) and \
play_center.y <= (foeC.y + foeH/2 + playH/4):
while True:
k = win.checkKey()
if k == "Right":
player.move(0, 0)
print("don't")
#player.getAnchor()
win.checkKey()
elif k == "Left":
break
if play_center.x == (foeC.x + foeW/2 + playW/4) and \
play_center.y >= (foeC.y - foeH/2 - playH/4) and \
play_center.y <= (foeC.y + foeH/2 + playH/4):
while True:
k = win.checkKey()
if k == "Left":
player.move(0, 0)
print("don't")
#player.getAnchor()
win.checkKey()
elif k == "Right":
break
##def movement(win):
## player = Image(Point(450, 500), "knight.png")
## player.draw(win)
## dx, dy = 10, 10
##
## while True:
## k = win.checkKey()
## if k == 'Left':
## player.move(-dx, 0)
## elif k == 'Right':
## player.move(dx, 0)
## elif k == "Up":
## player.move(0, -dy)
## elif k == "Down":
## player.move(0, dy)
## elif k == 'period':
## break
## win.close()
def main():
title_screen()
intro_screen()
intro_screen2()
forest_battle()
main()
| [
"noreply@github.com"
] | DaemeonH.noreply@github.com |
559c610e46d10f1c51de20f3558bfb1bbd7f5a3b | 8ffa792c46bb25a2c8d5fe80c03db67b62ece193 | /eval/b05902046/method.py | 8f637defbbc74992ad35440b4cd8b12ce36d40e9 | [] | no_license | b05902046/AI-Othello | b6836859cc14c69d1d544387a694b9017ef5aeca | c3574e05adee6ac34b85319d0d6ce34116be9fd7 | refs/heads/master | 2020-03-19T23:16:24.991047 | 2018-06-24T11:34:45 | 2018-06-24T11:34:45 | 137,000,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,160 | py | import random
def naiive_get_best_move(currentState, myColor):
max, maxi, maxj = -1, -1, -1
moves = currentState.get_legal_moves(myColor, True)
for [i, j, score] in moves:
if max < score:
max, maxi, maxj = score, i, j
return [maxi, maxj]
def min_max_get_best_move(priceTable, currentState, myColor, depth, warn):
#print "depth", depth, "myColor", myColor
#currentState.print_board()
if depth == limit_depth:
value = currentState.evaluate(priceTable)
#print (depth, None, None, value)
return (None, None, value)
moves = currentState.get_legal_moves(myColor, False)
if warn == True:
if len(moves) == 0:
nums = currentState.count_wb()
if nums[1] > nums[0]:
value = float('inf')
elif nums[1] < nums[0]:
value = float('-inf')
else:
value = 0
#print (depth, None, None, value)
return (None, None, value)
elif len(moves) == 0:
return min_max_get_best_move(priceTable, currentState, (3-myColor), depth, True)
if myColor == 1:
#black
MAX, maxi, maxj = float('-inf'), None, None
for move in moves:
newState = currentState.get_successor_state(myColor, move[0], move[1])
new = min_max_get_best_move(priceTable, newState, 2, depth+1, False)
if new[2] > MAX:
MAX, maxi, maxj = new[2], move[0], move[1]
#print "score", MAX
#print "after"
#currentState.print_board()
return (maxi, maxj, MAX)
elif myColor == 2:
#white
MIN, mini, minj = float('inf'), None, None
for move in moves:
newState = currentState.get_successor_state(myColor, move[0], move[1])
#print "new"
#currentState.print_board()
new = min_max_get_best_move(priceTable, newState, 1, depth+1, False)
if new[2] < MIN:
MIN, mini, minj = new[2], move[0], move[1]
#print "score", MIN
#print "after 2"
#currentState.print_board()
return (mini, minj, MIN)
def alpha_beta(priceTable, currentState, myColor, depth, limit_depth, warn, alpha, beta):
if depth == limit_depth:
value = currentState.evaluate(priceTable)
return (None, None, value)
moves = currentState.get_legal_moves(myColor, False)
if warn == True:
if len(moves) == 0:
nums = currentState.count_wb()
if nums[1] > nums[0]:
value = 810000.0 #something big, but smaller than float('inf')
elif nums[1] < nums[0]:
value = -810000.0 #something small, but bigger than float('-inf')
else:
value = 0
#print (depth, None, None, value)
return (None, None, value)
elif len(moves) == 0:
return alpha_beta(priceTable, currentState, (3-myColor), depth, limit_depth, True, alpha, beta)
if myColor == 1:
#black
MAX, maxi, maxj = float('-inf'), None, None
for move in moves:
newState = currentState.get_successor_state(myColor, move[0], move[1])
new = alpha_beta(priceTable, newState, 2, depth+1, limit_depth, False, alpha, beta)
if new[2] > MAX:
MAX, maxi, maxj = new[2], move[0], move[1]
alpha = max(MAX, alpha)
if alpha > beta:
break
#print "score", MAX
#print "after"
#currentState.print_board()
return (maxi, maxj, MAX)
elif myColor == 2:
#white
MIN, mini, minj = float('inf'), None, None
for move in moves:
newState = currentState.get_successor_state(myColor, move[0], move[1])
#print "new"
#currentState.print_board()
new = alpha_beta(priceTable, newState, 1, depth+1, limit_depth, False, alpha, beta)
if new[2] < MIN:
MIN, mini, minj = new[2], move[0], move[1]
beta = min(MIN, beta)
if alpha > beta:
break
#print "score", MIN
#print "after 2"
#currentState.print_board()
return (mini, minj, MIN)
def getAction(dice, price_table, gameState, myColor, depth_limit, method):
moves = gameState.get_legal_moves(myColor, False)
if method == "naiive":
pass
elif method == "minimax":
pass
elif method == "alpha":
return alpha_beta(price_table, gameState, myColor, 1, depth_limit, False, float('-inf'), float('inf'))
elif method == "alpha_rand":
bestMove = alpha_beta(price_table, gameState, myColor, 1, depth_limit, False, float('-inf'), float('inf'))
if random.random() <= dice:
return (bestMove[0], bestMove[1])
else:
who = random.randint(0, len(moves)-1)
return (moves[who][0], moves[who][1])
| [
"b05902046@ntu.edu.tw"
] | b05902046@ntu.edu.tw |
5df196843c25b81138c44c75987e86e0af7debc1 | 3dfa65c42241d866dcf82d2f6faf603e5aec096c | /gladweb/views/index.py | 340e3f77f04331b417dcf26cd0303c55a194ac4b | [] | no_license | Dav1dde/glad-web | 0ad5f11f4ca0966ae29b4c1972a02295bdd6c47c | ff05bd08efca97c2f40fbf3e9f8fde265b7c8e7d | refs/heads/master | 2023-03-07T18:31:52.638325 | 2023-02-25T16:14:12 | 2023-02-25T16:14:12 | 35,337,528 | 396 | 74 | null | null | null | null | UTF-8 | Python | false | false | 4,874 | py | import json
import os
import sys
import tempfile
import zipfile
from collections import namedtuple
from flask import Blueprint, request, render_template, g, url_for, redirect, flash, current_app
import glad.lang.c.generator
from glad.spec import SPECS
from gladweb.views.exception import InvalidUserInput
if sys.version_info >= (3, 0):
from itertools import zip_longest, chain
from urllib.parse import urlencode
else:
from itertools import izip_longest as zip_longest, chain
from urllib import urlencode
Version = namedtuple('Version', ['major', 'minor'])
index = Blueprint('index', __name__)
@index.route('/', methods=['GET'])
def landing():
return render_template(
'index.html', **g.metadata.as_dict()
)
def validate_form():
language = request.form.get('language')
specification = request.form.get('specification')
profile = request.form.get('profile', 'compatibility')
apis = request.form.getlist('api')
extensions = request.form.getlist('extensions')
loader = request.form.get('loader') is not None
omitkhr = request.form.get('omitkhr') is not None
local_files = request.form.get('localfiles') is not None
messages = list()
if language not in (l.id for l in g.metadata.languages):
raise InvalidUserInput('Invalid language "{0}"'.format(language))
if specification not in (s.id for s in g.metadata.specifications):
raise InvalidUserInput('Invalid specification "{0}"'.format(specification))
if profile not in (p.id for p in g.metadata.profiles):
raise InvalidUserInput('Invalid profile "{0}"'.format(profile))
apis_parsed = dict()
for api in apis:
name, version = api.split('=')
if version == 'none':
continue
apis_parsed[name] = Version(*map(int, version.split('.')))
if len(apis_parsed) == 0:
raise InvalidUserInput(
'No API for specification selected'.format(specification)
)
return messages, language, specification, profile, apis_parsed, extensions, loader, omitkhr, local_files
def write_dir_to_zipfile(path, zipf, exclude=None):
if exclude is None:
exclude = []
for root, dirs, files in os.walk(path):
for file_ in files:
if file_ in exclude:
continue
zipf.write(
os.path.join(root, file_),
os.path.relpath(os.path.join(root, file_), path)
)
def glad_generate():
# this is really getting ugly, where did my code quality standards go?
messages, language, specification, profile, apis, extensions, loader_enabled, omitkhr, local_files = validate_form()
cls = SPECS[specification]
spec = cls.fromstring(g.cache.open_specification(specification).read())
if spec.NAME == 'gl':
spec.profile = profile
generator_cls, loader_cls = glad.lang.get_generator(
language, spec.NAME.lower()
)
if loader_cls is None:
raise InvalidUserInput('API/Spec not yet supported')
loader = loader_cls(apis)
loader.disabled = not loader_enabled
loader.local_files = local_files
glad.lang.c.generator.KHRPLATFORM = 'file:' + g.cache.get_khrplatform()
# the suffix is required because mkdtemp sometimes creates directories with an
# underscore at the end, we later use werkzeug.utils.secure_filename on that directory,
# this function happens to strip underscores...
directory = tempfile.mkdtemp(dir=current_app.config['TEMP'], suffix='glad')
os.chmod(directory, 0o750)
with generator_cls(directory, spec, apis, extensions, loader, local_files=local_files, omit_khrplatform=omitkhr) as generator:
generator.generate()
zip_path = os.path.join(directory, 'glad.zip')
with open(zip_path, 'wb') as fobj:
zipf = zipfile.ZipFile(fobj, mode='w')
write_dir_to_zipfile(directory, zipf, exclude=['glad.zip'])
zipf.close()
serialized = urlencode(list(chain.from_iterable(
zip_longest('', x[1], fillvalue=x[0]) for x in request.form.lists())
))
serialized_path = os.path.join(directory, '.serialized')
with open(serialized_path, 'w') as fobj:
json.dump({'params': serialized, 'messages': messages}, fobj)
name = os.path.split(directory)[1]
if current_app.config['FREEZE']:
current_app.freezer.freeze(name)
return url_for('generated.autoindex', root=name)
@index.route('/generate', methods=['POST'])
def generate():
try:
url = glad_generate()
except Exception as e:
import gladweb
if gladweb.sentry is not None:
gladweb.sentry.captureException()
current_app.logger.exception(e)
current_app.logger.error(request.form)
flash(str(e), category='error')
return redirect(url_for('index.landing'))
return redirect(url)
| [
"admin@dav1d.de"
] | admin@dav1d.de |
02cfd708885095b2ba4e02b81745936c1ea47f59 | e224a8680496757bfaad411181062a81ca1824a2 | /BinarySearch.py | 78dace1202c4ccdc6601a640ff92542c4fb7834b | [] | no_license | Tech-Learner07/DS-Algo | ab9329cf2212363b83b4f086f56c7a65b33c9fe2 | d92694040dedfa903afc3b1637c7a1969decd57b | refs/heads/main | 2023-02-24T11:20:00.344056 | 2021-02-02T11:33:19 | 2021-02-02T11:33:19 | 334,334,675 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | '''
Check it out: https://en.wikipedia.org/wiki/Binary_search_algorithm
'''
def binarySearch(theList, target):
first = 0
last = len(theList)-1
isThere = False
while first <= last and not isThere:
mid = (first+last)//2
if theList[mid] == target:
isThere = True
else:
if target < theList[mid]:
last = mid - 1
else:
first = mid + 1
return isThere
myList = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
target = 16
if binarySearch(myList, 16):
print('Item found.')
else:
print('Item not found.')
| [
"72344536+MR-Hack-403@users.noreply.github.com"
] | 72344536+MR-Hack-403@users.noreply.github.com |
5ebc3eed3f50f40a3bef822f6948346cd9737775 | 2867eb59cb2f47e256b7594089ef22837805c577 | /models/UNet.py | d38a1974554d51c59b632e7ee41214adaa7d1171 | [] | no_license | chiukin/ESFNet-Pytorch | 931dd97ea90aa05ccae31c35d6b1b61fea464ce9 | 9c49e65cbf2929917a32caca8e9e35390c2141f1 | refs/heads/master | 2020-05-05T08:22:07.816532 | 2019-03-28T12:03:59 | 2019-03-28T12:03:59 | 179,861,160 | 2 | 0 | null | 2019-04-06T16:54:29 | 2019-04-06T16:54:28 | null | UTF-8 | Python | false | false | 3,459 | py | # sub-parts of the U-Net model
import torch
import torch.nn as nn
import torch.nn.functional as F
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class inconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(inconv, self).__init__()
self.conv = double_conv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class down(nn.Module):
def __init__(self, in_ch, out_ch):
super(down, self).__init__()
self.mpconv = nn.Sequential(
nn.MaxPool2d(2),
double_conv(in_ch, out_ch)
)
def forward(self, x):
x = self.mpconv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class outconv(nn.Module):
def __init__(self, in_ch, out_ch):
super(outconv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x
class UNet(nn.Module):
def __init__(self, config):
super(UNet, self).__init__()
self.name = 'Unet'
self.config=config
self.inc = inconv(3, 64)
self.down1 = down(64, 128)
self.down2 = down(128, 256)
self.down3 = down(256, 512)
self.down4 = down(512, 512)
self.up1 = up(1024, 256)
self.up2 = up(512, 128)
self.up3 = up(256, 64)
self.up4 = up(128, 64)
self.outc = outconv(64, self.config.nb_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
#return F.sigmoid(x)
return x | [
"noreply@github.com"
] | chiukin.noreply@github.com |
70e8c66e667510aaf50021798b5fee88f47c4564 | aa8a204b727c613c4d635a8ece158490be8f367c | /functional_tests/server_tools.py | 5f47b04a1e05cb83771c07f533c0fb4d1b92af97 | [] | no_license | AntonioIsas/django-tdd | ed880e7c76bb54ae525d66be71084e1e0272e1f5 | 69a90f0de61aee4aa68b99b7c1a9fae9edfe359e | refs/heads/master | 2022-05-17T19:07:05.777201 | 2019-12-01T23:21:38 | 2019-12-01T23:21:38 | 169,775,323 | 0 | 0 | null | 2022-04-22T21:00:54 | 2019-02-08T17:48:29 | JavaScript | UTF-8 | Python | false | false | 871 | py | from fabric.api import run
from fabric.context_managers import settings, shell_env
def _get_manage_dot_py(host):
return f'~/sites/{host}/virtualenv/bin/python ~/sites/{host}/manage.py'
def reset_database(host):
manage_dot_py = _get_manage_dot_py(host)
with settings(host_string=f'antonioisas87_gmail_com@{host}'):
run(f'{manage_dot_py} flush --noinput')
def _get_server_env_vars(host):
env_lines = run(f'cat ~/{host}/.env').splitlines()
return dict(l.split('=') for l in env_lines if l)
def create_session_on_server(host, email):
manage_dot_py = _get_manage_dot_py(host)
with settings(host_string=f'antonioisas87_gmail_com@{host}'):
env_vars = _get_server_env_vars(host)
with shell_env(**env_vars):
session_key = run(f'{manage_dot_py} create_session {email}')
return session_key.strip()
| [
"antonioisas87@gmail.com"
] | antonioisas87@gmail.com |
e44e9989565a9d1ffcbc9142748500ff5a274785 | e0980f704a573894350e285f66f4cf390837238e | /.history/streams/blocks_20201022114431.py | 652f1b7cfbfba3d9c2325e1ea062cb799ef42b97 | [] | no_license | rucpata/WagtailWebsite | 28008474ec779d12ef43bceb61827168274a8b61 | 5aa44f51592f49c9a708fc5515ad877c6a29dfd9 | refs/heads/main | 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,583 | py | from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class LinkValue(blocks.StructValue):
"""Dodatkowao logika dla lików"""
def url(self):
interlan_page = self.get('internal_page')
external_l
class Link(blocks.StructBlock):
link_text = blocks.CharBlock(
max_length=50,
default='Więcej szczegółów'
)
interal_page = blocks.PageChooserBlock(
required=False
)
external_link = blocks.URLBlock(
required=False
)
class Meta:
value_class = LinkValue
class Card(blocks.StructBlock):
title = blocks.CharBlock(
max_length=100,
help_text = 'Pogrubiony tytuł tej karty. Maksymalnie 100 znaków.'
)
text = blocks.TextBlock(
max_length=255,
help_text='Opcjonalny tekst tej karty. Maksymalnie 255 znaków.'
)
image = ImageChooserBlock(
help_text = 'Obraz zostanie automatycznie przycięty o 570 na 370 pikseli'
)
link = Link(help_text = 'Wwybierz link')
class CardsBlock(blocks.StructBlock):
cards = blocks.ListBlock(
Card()
)
class Meta:
template = 'streams/card_block.html'
icon = 'image'
label = 'Karty standardowe'
| [
"rucinska.patrycja@gmail.com"
] | rucinska.patrycja@gmail.com |
566b949d5b6105ffa0ac3812e25ae751a59de219 | fdd67d3733d3db2fb381f25b0985952e3f7c9a4f | /epdAlarm.py | 9fc35afaa5fac12a5bf4482d115b6d71392bd049 | [] | no_license | star-controls/epdAlarm | 56c6ef50616ea4290217b41d0daf3c4ebf7ee952 | 4a966e38116344b9d209dd8efc9abfbbc0e4db5a | refs/heads/master | 2020-03-21T07:51:27.655488 | 2019-03-05T15:04:12 | 2019-03-05T15:04:12 | 138,303,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,072 | py |
from epdchan import epdchan
import paho.mqtt.client as mqtt
from watchdog import watchdog
from softioc import builder
import time
import pandas as pd
#EPD PVs
builder.SetDeviceName('EPD')
#list of all EPD channels as 3-index list
npp = 12
ntile = 31
elist = []
#east/west loop
for ew in range(0,2):
elist.append([])
#PP loop
for ipp in range(0,npp+1):
elist[ew].append([])
#tile loop
for itile in range(ntile+1):
#PP starts at 1, handled in epdchan constructor
elist[ew][ipp].append( epdchan(ew, ipp, itile) )
#watchdog timer for 60 seconds
wdt = watchdog(60, elist)
#file holding alarm limit values
csvlim = "limits.csv"
lframe = pd.read_csv(csvlim)
#set initial alarm values
elist[0][0][0].limits.imon_max = lframe['imon_max'][0]
elist[0][0][0].limits.rmon_min = lframe['rmon_min'][0]
elist[0][0][0].limits.rmon_max = lframe['rmon_max'][0]
elist[0][0][0].limits.temp_max = lframe['temp_max'][0]
#functions to show alarm limits
#_____________________________________________________________________________
def get_imon_max():
return elist[0][0][0].limits.imon_max
#_____________________________________________________________________________
def get_rmon_min():
return elist[0][0][0].limits.rmon_min
#_____________________________________________________________________________
def get_rmon_max():
return elist[0][0][0].limits.rmon_max
#_____________________________________________________________________________
def get_temp_max():
return elist[0][0][0].limits.temp_max
#_____________________________________________________________________________
def put_limit(key, val):
#put limit value to file
lframe[key][0] = val
lframe.to_csv(csvlim, index=False)
#PVs to set alarm limits
#_____________________________________________________________________________
def set_imon_max(val):
elist[0][0][0].limits.imon_max = val
put_limit('imon_max', val)
imon_max_pv = builder.aOut("imon_max", on_update=set_imon_max, initial_value=get_imon_max(), PREC=2)
#_____________________________________________________________________________
def set_rmon_min(val):
elist[0][0][0].limits.rmon_min = val
put_limit('rmon_min', val)
rmon_min_pv = builder.aOut("rmon_min", on_update=set_rmon_min, initial_value=get_rmon_min(), PREC=1)
#_____________________________________________________________________________
def set_rmon_max(val):
elist[0][0][0].limits.rmon_max = val
put_limit('rmon_max', val)
rmon_max_pv = builder.aOut("rmon_max", on_update=set_rmon_max, initial_value=get_rmon_max(), PREC=1)
#_____________________________________________________________________________
def set_temp_max(val):
elist[0][0][0].limits.temp_max = val
put_limit('temp_max', val)
temp_max_pv = builder.aOut("temp_max", on_update=set_temp_max, initial_value=get_temp_max(), PREC=1)
#_____________________________________________________________________________
def init_alarm_limits():
#put initial values to alarm limits PVs
#imon_max_pv.set(get_imon_max())
#rmon_min_pv.set(get_rmon_min())
#rmon_max_pv.set(get_rmon_max())
#temp_max_pv.set(get_temp_max())
pass
#functions for mqtt message
#_____________________________________________________________________________
def get_msg_id(msg, idnam):
#get message id
return ( msg[msg.find(idnam):] ).split('"')[2]
#_____________________________________________________________________________
def process_msg(msg):
#parse the message, get the values, put them to EPD channel objects
#check message validity
if get_msg_id(msg, "dcs_id") != "epd_controller" or get_msg_id(msg, "dcs_uid") != "tonko":
return
wdt.reset()
#message header
hstart = msg.find("[", msg.find("dcs_header")) + 1
hend = msg.find("]")
hlist = msg[hstart:hend].split(",")
id_ew = hlist.index('"fps_quad"')
id_pp = hlist.index('"fps_layer"')
id_tile = hlist.index('"fps_channel"')
id_vslope = hlist.index('"vslope"')
id_vcomp = hlist.index('"temp"')
id_imon = hlist.index('"imon0"')
id_rmon = hlist.index('"rmon0"')
id_state = hlist.index('"state"')
#get values table
vstart = msg.find("{", msg.find("dcs_values")) + 1
vend = msg.find("}", vstart)
vtab = msg[vstart:vend].split("]")
#table lines loop
for i in range(len(vtab)):
if vtab[i] == "":
continue
#list of values
vlist = vtab[i][vtab[i].find("[")+1:].split(",")
#EPD indices
ew = int(vlist[id_ew])
pp = int(vlist[id_pp])
tile = int(vlist[id_tile])
#print repr(ew), repr(pp), repr(tile)
#voltage and current values
epd = elist[ew][pp][tile]
epd.vslope = float(vlist[id_vslope])
epd.vcomp = float(vlist[id_vcomp])
epd.imon = float(vlist[id_imon])
epd.rmon = float(vlist[id_rmon])
epd.state = str(vlist[id_state]).lower().strip('"')
#print repr(epd.ew), repr(epd.pp), repr(epd.tile), repr(epd.vslope), repr(epd.vcomp), repr(epd.imon), repr(epd.rmon)
#put values to PVs in EPD object
epd.pvput()
#mqtt client functions
#_____________________________________________________________________________
def on_connect(client, userdata, flags, rc):
# The callback for when the client receives a CONNACK response from the server.
print("MQTT connected with result code "+str(rc))
client.subscribe("dcs/set/Control/epd/epd_control_fee")
#_____________________________________________________________________________
def on_message(client, userdata, msg):
# The callback for when a PUBLISH message is received from the server.
process_msg(msg.payload)
#_____________________________________________________________________________
def read_mqtt():
#initialize alarm limits PVs
init_alarm_limits()
#main mqtt loop
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("mq01.starp.bnl.gov")
client.loop_start()
wdt.start()
#watchdog test, 10 sec timeout
#time.sleep(10)
#client.loop_stop()
#print "alarm on 0, 1, 0"
#elist[0][1][0].set_invalid()
#time.sleep(20)
#print "running again"
#client.loop_start()
| [
"jaroslav.adam@cern.ch"
] | jaroslav.adam@cern.ch |
de650df458a9ff95cf300012180b8a01322efc8c | c928b3fad4e391a6ab50c692907794b79200876c | /forms/registration_form.py | da482a55059c9c6dce74e1cfa825244ada73610e | [] | no_license | AidasKitm/ZP202Auction | d65a5945a6dac698ae69db5a5bc80be5e959c84e | efb9ba39fc24bc877900e58c458d46f93aa605be | refs/heads/master | 2023-03-23T06:59:38.447641 | 2021-03-10T07:09:57 | 2021-03-10T07:09:57 | 337,635,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | from wtforms import StringField, PasswordField
from flask_wtf import FlaskForm
from wtforms.validators import DataRequired, Email
from wtforms.fields.html5 import EmailField
class RegisterForm(FlaskForm):
first_name = StringField("Please input your first name", validators=[DataRequired()])
last_name = StringField("Please input your last name", validators=[DataRequired()])
email = EmailField("Please input your email", validators=[DataRequired(), Email()])
address = StringField("Please input your address", validators=[DataRequired()])
username = StringField("Please input your username", validators=[DataRequired()])
password = PasswordField("input password", validators=[DataRequired()]) | [
"aidas.puskunigis@kitm.lt"
] | aidas.puskunigis@kitm.lt |
79f618f49ff37f1fcc46d4c8c90d6c24e380892e | ae75d226fe9c806baa0bc1960f19a57a0c8c6d00 | /strava_get_segment_details.py | 901e0182963f21250a8be6d4e97dcff56f6b6fc4 | [] | no_license | vigi4cure/vigi4cure.github.io | 5c06f1fe7bc15fad96c594104d0146abf0c271f7 | 895e58988731005b33f8a7ab9a2c23256e82a2b6 | refs/heads/master | 2022-12-10T09:07:22.697163 | 2019-06-08T16:40:38 | 2019-06-08T16:40:38 | 131,753,372 | 1 | 1 | null | 2022-12-08T02:04:15 | 2018-05-01T19:19:16 | HTML | UTF-8 | Python | false | false | 1,966 | py | #!/usr/bin/python3
import csv, time
from stravalib.client import Client
from retrying import retry
@retry(wait_exponential_multiplier=1000, wait_exponential_max=10000, stop_max_delay=30000)
def retry_get_segment(client,j):
return client.get_segment(j)
def main():
# keywords = 'Brossard'
segmentlist = []
# file = open('segments_%s.csv'%keywords)
file = open('segments.csv')
reader = csv.DictReader(file)
for line in reader:
segmentlist.append(line["Segment Id"])
client = Client(access_token='99c2994556a29905b96eb4197996854041ca47ca')
# segoutfile = open('segment_%s_details.csv'%keywords, 'w')
segoutfile = open('segment_details.csv', 'w')
segoutfile.write('id,segment_id,segment_name,resource_state,start_latitude,start_longitude,end_latitude,end_longitude'+'\n')
# segbad = open('segment_%s_bad.csv'%keywords, 'w')
segbad = open('bad_segments.csv', 'w')
segbad.write('Segment ID\n')
# box = [[45.719182 , -74.023017], [45.380184 , -73.436622]]
for num,j in enumerate(segmentlist):
try:
seg = retry_get_segment(client,j)
print(str(j))
segoutfile.write('%s,%s,"%s",%s,%s,%s,%s,%s\n'%(num,
seg.id,
seg.name,
seg.resource_state,
seg.start_latitude,
seg.start_longitude,
seg.end_latitude,
seg.end_longitude))
except Exception as e:
print(str(j), ':', str(e))
segbad.write(str(j) + '\n')
time.sleep(1.5)
segoutfile.close()
segbad.close()
if __name__ == "__main__":
main()
| [
"ywen@vigilantglobal.com"
] | ywen@vigilantglobal.com |
fd4c9a5bb433757d1d9b8356a5dbb2cf4c8a67af | cc55ed0b6f822da0ef293efa0261a70506a7d346 | /05_Django/05_django_form/config/settings.py | 3e4b2f3941030e2dabc3616e0d47a3af75e61a5e | [] | no_license | Gunny-Lee/TIL | 4e04a3ce836a845a7726b1899cfccef920f3ce56 | 8f9847a34ad99b69d746c2237b6518cefacc5338 | refs/heads/master | 2022-12-27T10:40:30.959182 | 2019-11-18T08:36:16 | 2019-11-18T08:36:16 | 216,502,916 | 0 | 0 | null | 2022-12-08T06:48:21 | 2019-10-21T07:21:35 | Python | UTF-8 | Python | false | false | 3,361 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 2.2.7.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q&%76z_icsygv*95mcvxp!ek4vj+8hw6_c&a-19lkozb-nlx(z'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'articles',
'accounts',
'bootstrap4',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'config', 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
# login_required 요청 경로 커스터마이징
# - 기본값은 '/accounts/login/'
# LOGIN_URL = '/members/login/'
# 기본값: auth.User
AUTH_USER_MODEL = 'accounts.User' | [
"student@M150127"
] | student@M150127 |
7ee8daf4289f9f23986a34994da4d93f21674b33 | 190a73f2ecfa7d2e53a6879db9dcc645ea8684d2 | /main.py | ba00781ab646ef665c894aa1278011568e54acdb | [] | no_license | LucasBarrosDosSantos/algorithm_classification_k-nearest_neighbor | 1f9d9f71b20126db7fe8f602878a809a42ae3fed | f4e85b154c4d653f0653d1994e620888e0ee352e | refs/heads/master | 2022-12-11T20:28:49.525074 | 2020-09-16T23:59:51 | 2020-09-16T23:59:51 | 296,173,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,920 | py | # definindo dataframe
df = []
import math
# lendo csv com tamanho das colunas
with open("breast_cancer.csv", "r") as dataset:
for instancia in dataset.readlines():
x = instancia.replace('\n', '').replace('N', '1').replace('O', '0').split(',')
df.append(
[
float(x[0]),
float(x[1]),
float(x[2]),
float(x[3]),
float(x[4]),
float(x[5]),
float(x[6]),
float(x[7])
]
)
# function que retorna informações condicionadas ao parametro info
def info_dataset(amostras, info=True):
outpu1, output2 = 0, 0
for amostra in amostras:
if amostra[-1] == 1:
outpu1 += 1
else:
output2 += 1
if info == True:
print('Total de amostras :', len(amostras))
print('Total normal : ', outpu1)
print('Total Alterado: ', output2)
return [len(amostras), outpu1, output2]
# qual percentual do dataset vai ser utilizado para treino ?
porcentagem = 0.5
# pegando as info do dataset
_, output1, output2 = info_dataset(df, info=False)
# definindo array para treinamento
treinamento = []
# array de testes
teste = []
max_output1 = int(porcentagem * output1)
max_output2 = int(porcentagem * output2)
total_output1 = 0
total_output2 = 0
# calcular a distancia entre os pontos
def distancia_euclidiana(p1, p2):
dimensao = len(p1)
soma = 0
for i in range(dimensao):
soma += (p1[i] - p2[i]) ** 2
return math.sqrt(soma)
for amostra in df:
if (total_output1 + total_output2) < (max_output1 + max_output2):
# numero de treinamentos ainda não está satisfeito
treinamento.append(amostra)
if amostra[-1] == 1 and total_output1 < max_output1:
total_output1 += 1
else:
total_output2 += 1
else:
# define array total de testes
teste.append(amostra)
# função que executa classificação utilizando a função euclidiana
def knn(treinamento, nova_amostra, k):
distancias = {}
tamanho_treino = len(treinamento)
for i in range(tamanho_treino):
d = distancia_euclidiana(treinamento[i], nova_amostra)
distancias[i] = d
k_vizinhos = sorted(distancias, key=distancias.get)[:k]
qtd_output1 = 0
qtd_output2 = 0
for indice in k_vizinhos:
if treinamento[indice][-1] == 1:
qtd_output1 += 1
else:
qtd_output2 += 1
if qtd_output1 > qtd_output2:
return 1
else:
return 0
acertos = 0
k = 9
for amostra in teste:
classe = knn(treinamento, amostra, k)
if amostra[-1] == classe:
acertos += 1
print("Total de treinamento ", len(treinamento))
print("Total de testes ", len(teste))
print("Total de acertos ", acertos)
print("Porcentagem de acerto ", 100 * acertos / len(teste))
| [
"lucas2019.fullstack@gmail.com"
] | lucas2019.fullstack@gmail.com |
3ca238515e0251221aaef76561e40ed16ed03328 | 55f8345165016d4080a0d380dff7be8de3b37643 | /rm_background.py | fee59aca194b30594254e25de134b2a081548306 | [] | no_license | hamparmin/face_extract | d9c3d8a3c030787b91ade89bbd8d942de79632c5 | 99bdd00add30b428414d620836aa2d2f204590b2 | refs/heads/master | 2022-11-06T04:18:22.515467 | 2020-06-26T13:29:12 | 2020-06-26T13:29:12 | 275,145,295 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,406 | py |
from torchvision import models
from PIL import Image
import matplotlib.pyplot as plt
import torch
import numpy as np
import cv2
# Apply the transformations needed
import torchvision.transforms as T
# Define the helper function
def decode_segmap(image, source, nc=21):
label_colors = np.array([(0, 0, 0), # 0=background
# 1=aeroplane, 2=bicycle, 3=bird, 4=boat, 5=bottle
(128, 0, 0), (0, 128, 0), (128, 128, 0), (0, 0, 128), (128, 0, 128),
# 6=bus, 7=car, 8=cat, 9=chair, 10=cow
(0, 128, 128), (128, 128, 128), (64, 0, 0), (192, 0, 0), (64, 128, 0),
# 11=dining table, 12=dog, 13=horse, 14=motorbike, 15=person
(192, 128, 0), (64, 0, 128), (192, 0, 128), (64, 128, 128), (192, 128, 128),
# 16=potted plant, 17=sheep, 18=sofa, 19=train, 20=tv/monitor
(0, 64, 0), (128, 64, 0), (0, 192, 0), (128, 192, 0), (0, 64, 128)])
r = np.zeros_like(image).astype(np.uint8)
g = np.zeros_like(image).astype(np.uint8)
b = np.zeros_like(image).astype(np.uint8)
for l in range(0, nc):
idx = image == l
r[idx] = label_colors[l, 0]
g[idx] = label_colors[l, 1]
b[idx] = label_colors[l, 2]
rgb = np.stack([r, g, b], axis=2)
# Load the foreground input image
foreground = cv2.imread(source)
# Change the color of foreground image to RGB
# and resize image to match shape of R-band in RGB output map
foreground = cv2.cvtColor(foreground, cv2.COLOR_BGR2RGB)
foreground = cv2.resize(foreground,(r.shape[1],r.shape[0]))
# Create a background array to hold white pixels
# with the same size as RGB output map
background = 255 * np.ones_like(rgb).astype(np.uint8)
# Convert uint8 to float
foreground = foreground.astype(float)
background = background.astype(float)
# Create a binary mask of the RGB output map using the threshold value 0
th, alpha = cv2.threshold(np.array(rgb),0,255, cv2.THRESH_BINARY)
# Apply a slight blur to the mask to soften edges
alpha = cv2.GaussianBlur(alpha, (7,7),0)
# Normalize the alpha mask to keep intensity between 0 and 1
alpha = alpha.astype(float)/255
# Multiply the foreground with the alpha matte
foreground = cv2.multiply(alpha, foreground)
# Multiply the background with ( 1 - alpha )
background = cv2.multiply(1.0 - alpha, background)
# Add the masked foreground and background
outImage = cv2.add(foreground, background)
# Return a normalized output image for display
return outImage/255
def segment(net, path, output_path, show_orig=True, dev='cuda'):
img = Image.open(path)
if show_orig: plt.imshow(img); plt.axis('off'); plt.show()
# Comment the Resize and CenterCrop for better inference results
trf = T.Compose([T.Resize(450),
#T.CenterCrop(224),
T.ToTensor(),
T.Normalize(mean = [0.485, 0.456, 0.406],
std = [0.229, 0.224, 0.225])])
inp = trf(img).unsqueeze(0).to(dev)
out = net.to(dev)(inp)['out']
om = torch.argmax(out.squeeze(), dim=0).detach().cpu().numpy()
rgb = decode_segmap(om, path)
plt.imshow(rgb)
plt.axis('off')
plt.savefig(output_path)
dlab = models.segmentation.deeplabv3_resnet101(pretrained=1).eval()
def replace_background(face_path, output_path):
segment(dlab, face_path, output_path, show_orig=False) | [
"43430778+hamparmin@users.noreply.github.com"
] | 43430778+hamparmin@users.noreply.github.com |
e16438dc36975f45b9ebf78a6a45f4a95b859c51 | aef69557d8960205a780e61b7c2dfbb1d7733449 | /Code/AshtonSmith/django_labs/django_doc_turotial/mysite/polls/views.py | d9488434e4b59abcd6fe9b5deac77cb5b83dfeab | [] | no_license | sbtries/class_pandaaaa | 579d6be89a511bdc36b0ce8c95545b9b704a734a | bbf9c419a00879118a55c2c19e5b46b08af806bc | refs/heads/master | 2023-07-18T14:18:25.881333 | 2021-09-02T22:48:29 | 2021-09-02T22:48:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,379 | py | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse, Http404, HttpResponseRedirect
from .models import Question, Choice
from django.template import loader
from django.urls import reverse
from django.views import generic
from django.utils import timezone
# Create your views here.
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
# template = loader.get_template('polls/index.html')
context = {
'latest_question_list':latest_question_list
}
return render(request, 'polls/index.html' , context)
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]
def get_queryset(self):
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
# def detail(request, question_id):
# try:
# question = Question.objects.get(pk=question_id)
# except Question.DoesNotExist:
# raise Http404('qUesTiOn does not exist')
# return HttpResponse("you're looking at question %s" % question_id)
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
# def detail(request, question_id):
# try:
# question = Question.objects.get(pk=question_id)
# except Question.DoesNotExist:
# raise Http404("Question does not exist")
# return render(request, 'polls/detail.html', {'question': question})
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def results(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/results.html', {'question': question})
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
# class DetailView(generic.DetailView):
# ...
# def get_queryset(self):
# """
# Excludes any questions that aren't published yet.
# """
# return Question.objects.filter(pub_date__lte=timezone.now())
# return HttpResponse('your voting on question %s.' % question_id)
# output= ', '.join([q.question_text for q in latest_question_list])
# return HttpResponse(output)
# return HttpResponse(template.render(context,request))
# print('index')
# def results(request, question_id):
# response = "your looking at the results of question %s."
# return HttpResponse(response % question_id)
# return render(request, 'polls/detail.html', {'question':question}) | [
"ashton23@pdx.edu"
] | ashton23@pdx.edu |
8487a600e8906b93b9849ec38bb6cbc794d4f4dc | 1dbdca507976f65ec3434ffa7c6a37e9de646efe | /012_rgb_led.py | 5620c6be287dc15ca8159898577b1823cb6ea045 | [] | no_license | hoovejd/raspberry_pi_examples | b8e4861dfa3dd10f0c58347662b7ba877260c17d | 0a868b25e24c6eda06f27a1147960642f0a3b8a4 | refs/heads/master | 2020-03-26T15:58:54.075282 | 2018-11-09T19:34:51 | 2018-11-09T19:34:51 | 145,075,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 518 | py | from gpiozero import RGBLED
from time import sleep
led = RGBLED(red=9, green=10, blue=11)
led.red = 1 #full red
sleep(1)
led.red = 0.5 #half red
sleep(1)
led.color = (0,1,0) # full green
sleep(1)
led.color = (1,0,1) #magenta
sleep(1)
led.color = (1,1,0) #yellow
sleep(1)
led.color = (0,1,1) #cyan
sleep(1)
led.color = (1,1,1) #white
sleep(1)
led.color = (0, 0, 0) #off
sleep(1)
# slowly increase intensity of blue
for n in range(100):
led.blue = n/100
sleep(0.1)
| [
"hoov85@gmail.com"
] | hoov85@gmail.com |
3b34f0811582e8e171f74e10a8ac53405e6d76cb | f7b9c4519a5dcfb4b1cf736bfab339c1fe19d967 | /set2/c14_Byte_at_a_time_ECB_decryption_Harder.py | e35d036842e7a15c17d0f8933bf0986ef3109179 | [] | no_license | wotmd/cryptopals | e1b016bbc3ca91e9d0dfb50f4dbfc152f4e56d72 | f117c42662541c656f3e85af9ca3145ba4df5be9 | refs/heads/master | 2020-03-22T04:55:45.825375 | 2018-12-28T06:31:29 | 2018-12-28T06:31:29 | 139,530,347 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | from c12_Byte_at_a_time_ECB_decryption_Simple import *
from Crypto.Cipher import AES
from random import randint
import base64
import os
Prefix = os.urandom(randint(8, 48))
def prefix_encryption_oracle(plain):
plain = Prefix+plain
return encryption_oracle(plain)
def find_AAA(encryption_oracle, BLOCK_SIZE):
p = "A"*BLOCK_SIZE*4
c = encryption_oracle(p)
for i in range(0,len(c)-BLOCK_SIZE, BLOCK_SIZE):
first_part = c[i:i+BLOCK_SIZE]
front = c.find(first_part)
rear = c.rfind(first_part)
if front != rear and rear%BLOCK_SIZE==0:
return first_part, front
return "", 0
def find_Prefix_size(encryption_oracle, BLOCK_SIZE):
AAA, index = find_AAA(encryption_oracle, BLOCK_SIZE)
if AAA == "":
raise Exception('Not using ECB')
pre_cp = encryption_oracle("")
p = "A"
while(True):
cp = encryption_oracle(p)
if AAA in cp:
modsize = BLOCK_SIZE - len(p)
return index+modsize
break
p+="A"
if __name__ == '__main__':
BLOCK_SIZE = find_block_size(prefix_encryption_oracle)
PREFIX_SIZE = find_Prefix_size(prefix_encryption_oracle, BLOCK_SIZE)
print("BLOCK_SIZE : %d" % BLOCK_SIZE)
print("PREFIX_SIZE : %d" % PREFIX_SIZE)
secret = ""
while(True):
one_byte = get_next_byte(prefix_encryption_oracle, secret, BLOCK_SIZE, PREFIX_SIZE)
if one_byte == "":
break
secret += one_byte
print(secret)
| [
"wotmd8534@naver.com"
] | wotmd8534@naver.com |
851218c207bbbafac68c3e7b02fca713e1594cc8 | 182212d692ec227718721b6c55ae57dc3b2ae69b | /Test/FunctionalTests/CodeEditorTestScripts/CreateNewDocument.py | 11256547963a27a211f67d62af50145532f83c78 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | Optis-World/ATF | a4c0cb70ca24c6881e07d95d7daa424183b8479f | ef06985a75b704d712a0109911aa44609952e169 | refs/heads/master | 2021-12-23T04:07:23.494033 | 2019-08-22T17:59:53 | 2019-08-22T17:59:53 | 101,640,560 | 1 | 2 | Apache-2.0 | 2021-09-02T13:44:53 | 2017-08-28T12:23:53 | C# | UTF-8 | Python | false | false | 632 | py | #Copyright (c) 2014 Sony Computer Entertainment America LLC. See License.txt.
import sys
sys.path.append("./CommonTestScripts")
import Test
cntOg = Test.GetEnumerableCount(atfDocReg.Documents)
doc = atfDocService.OpenNewDocument(editor.CreateNewDocument(".txt"))
Test.Equal(cntOg + 1, Test.GetEnumerableCount(atfDocReg.Documents), "Verify document count increased")
Test.NotNull(doc, "Verify new document created")
Test.NotNull(atfDocReg.ActiveDocument, "Verify we have an active document")
Test.Equal(doc.Uri.LocalPath, atfDocReg.ActiveDocument.Uri.LocalPath, "Verify new document is the active document")
print Test.SUCCESS
| [
"ron_little@playstation.sony.com"
] | ron_little@playstation.sony.com |
020d81e63c587a5791e3740eb9728eb3782c80f2 | c770679730937a2dc3e9ec62d721c2782b44e6d0 | /ls8/kbtest.py | dcd988ae908c0a764e6b8ade67530a86859fc14a | [] | no_license | Lambda-CS/Computer-Architecture | 6156d9fef458183de5918c54f910e47e2b80d4e7 | 947b28fd319c074313b0b0a0c5df26bbb91c9f32 | refs/heads/master | 2022-11-20T17:35:21.651380 | 2020-07-26T02:10:38 | 2020-07-26T02:10:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | import curses
import msvcrt
# def main(stdscr):
# # do not wait for input when calling getch
# stdscr.nodelay(1)
# while True:
# # get keyboard input, returns -1 if none available
# c = stdscr.getch()
# if c != -1:
# # print numeric value
# stdscr.addstr(str(c) + ' ')
# stdscr.refresh()
# # return curser to start position
# stdscr.move(0, 0)
# if __name__ == '__main__':
# curses.wrapper(main)
print(curses.version)
| [
"antonyk@users.noreply.github.com"
] | antonyk@users.noreply.github.com |
a06b4cdb26e979978b7442a5953e6661148f9c4d | 90c6262664d013d47e9a3a9194aa7a366d1cabc4 | /scripts/make_contract_tests.py | c5a713b158970664e7323b7f9745d351a8a8b188 | [
"MIT"
] | permissive | tqtezos/pytezos | 3942fdab7aa7851e9ea81350fa360180229ec082 | a4ac0b022d35d4c9f3062609d8ce09d584b5faa8 | refs/heads/master | 2021-07-10T12:24:24.069256 | 2020-04-04T12:46:24 | 2020-04-04T12:46:24 | 227,664,211 | 1 | 0 | MIT | 2020-12-30T16:44:56 | 2019-12-12T17:47:53 | Python | UTF-8 | Python | false | false | 5,441 | py | from os.path import join, dirname, exists
from os import mkdir
import json
from conseil import conseil
from conseil.api import ConseilApi
from pytezos import pytezos
from tests import relpath
from tests.templates import michelson_coding_test_case, micheline_coding_test_case, \
test_michelson_parse,test_michelson_format, test_michelson_inverse, test_micheline_inverse
data_dir = join(dirname(dirname(__file__)), 'tests/contracts')
Account = conseil.tezos.babylonnet.accounts
Operation = conseil.tezos.babylonnet.operations
def get_accounts(limit=1):
operations = Operation.query(Operation.destination,
Operation.operation_group_hash.count()) \
.filter(Operation.destination.startswith('KT1'),
Operation.parameters.isnot(None),
Operation.parameters.notlike('Unparsable'),
Operation.kind == 'transaction',
Operation.status == 'applied') \
.order_by(Operation.operation_group_hash.count().desc()) \
.limit(limit) \
.all()
addresses = list(map(lambda x: x['destination'], operations))
accounts = Account.query(Account.account_id, Account.script, Account.storage) \
.filter(Account.account_id.in_(*addresses),
Account.storage.notlike('Unparsable'),
Account.script.notlike('Unparsable')) \
.all()
return accounts
def get_operations(account_id, limit=1):
operations = Operation.query(Operation.block_level.max().label('level'),
Operation.parameters) \
.filter(Operation.destination == account_id,
Operation.parameters.isnot(None),
Operation.parameters.notlike('Unparsable'),
Operation.kind == 'transaction',
Operation.status == 'applied',
Operation.internal.is_(False)) \
.limit(limit) \
.all()
return operations
def find_operation(block_level, destination):
opg_list = pytezos.shell.blocks[block_level].operations.managers()
for opg in opg_list:
for content in opg['contents']:
if content.get('parameters') and content['destination'] == destination:
return content['parameters'], opg['hash']
assert False
def make_package(account, operations=1):
account_dir = join(data_dir, account["account_id"])
if exists(account_dir):
return
else:
mkdir(account_dir)
files = {
'dir': account_dir,
'name': account['account_id'][:6],
'code': [],
'storage': [],
'parameter': []
}
def write_files(michelson, micheline, section, name):
tz_path = join(account_dir, f'{section}_{name}.tz')
json_path = join(account_dir, f'{section}_{name}.json')
with open(tz_path, 'w+') as f:
f.write(michelson)
with open(json_path, 'w+') as f:
f.write(json.dumps(micheline, indent=2))
files[section].append((name, tz_path, json_path))
contract = pytezos.shell.contracts[account['account_id']]()
write_files(
michelson=account['script'],
micheline=contract['script']['code'],
section='code',
name=account['account_id'][:6]
)
write_files(
michelson=account['storage'],
micheline=contract['script']['storage'],
section='storage',
name=account['account_id'][:6]
)
operations = get_operations(account['account_id'], limit=operations)
for operation in operations:
parameters, opg_hash = find_operation(operation['level'], account['account_id'])
write_files(
michelson=operation['parameters'],
micheline=parameters,
section='parameter',
name=opg_hash[:6]
)
return files
def make_michelson_tests(files: dict):
test_case = [
michelson_coding_test_case.format(case=files['name'])
]
for section in ['code', 'storage', 'parameter']:
for name, tz_path, json_path in files[section]:
case = f'{section}_{name}'
test_case.extend([
test_michelson_parse.format(case=case, json_path=relpath(json_path), tz_path=relpath(tz_path)),
test_michelson_format.format(case=case, json_path=relpath(json_path), tz_path=relpath(tz_path)),
test_michelson_inverse.format(case=case, json_path=relpath(json_path))
])
with open(join(files['dir'], f'test_michelson_coding_{files["name"]}.py'), 'w+') as f:
f.write(''.join(test_case))
def make_micheline_tests(files: dict):
test_case = [
micheline_coding_test_case.format(case=files['name'], json_path=relpath(files['code'][0][2]))
]
for section in ['storage', 'parameter']:
for name, tz_path, json_path in files[section]:
case = f'{section}_{name}'
test_case.append(
test_micheline_inverse.format(case=case, json_path=relpath(json_path), section=section)
)
with open(join(files['dir'], f'test_micheline_coding_{files["name"]}.py'), 'w+') as f:
f.write(''.join(test_case))
if __name__ == '__main__':
accounts = get_accounts(limit=100)
for acc in accounts:
package = make_package(acc, operations=7)
if package:
make_michelson_tests(package)
make_micheline_tests(package)
| [
"mz@baking-bad.org"
] | mz@baking-bad.org |
a3b4529f2a8af100e1863c8d7f61d0522f76b1ce | a46646a707b9d747fcf29a86f67a4ccbcbd0ddb9 | /week10/book/76prayme.py | 9a15021619c3da6599d23328531d5d56030c674c | [] | no_license | DevooKim/algorithm-study | 5720642bb43ea364dae924ee038f97379f2ef85b | 830b148defc7f0097abe2f5d3f4e9d8f3333efb0 | refs/heads/main | 2023-02-23T18:40:28.978111 | 2021-01-28T12:09:06 | 2021-01-28T12:09:06 | 302,206,505 | 2 | 1 | null | 2021-01-28T12:09:07 | 2020-10-08T01:54:08 | Python | UTF-8 | Python | false | false | 2,155 | py | import collections
import heapq
import functools
import itertools
import re
import sys
import math
import bisect
from typing import List
class Solution:
def minWindow(self, s: str, t: str) -> str:
# T의 크기부터 점점 키워가기
def contains(s_substr_lst: List, t_lst: List):
for t_elem in t_lst:
if t_elem in s_substr_lst:
s_substr_lst.remove(t_elem)
else:
return False
return True
if not s or not t:
return ''
window_size = len(t)
for size in range(window_size, len(s) + 1):
for left in range(len(s) - size + 1):
s_substr = s[left:left+size]
if contains(list(s_substr), list(t)):
return s_substr
return ''
def two_pointer_with_window(self, s: str, t: str) -> str:
need = collections.Counter(t)
missing = len(t)
left = start = end = 0
for right, char in enumerate(s, 1):
missing -= need[char] > 0
need[char] -= 1
if missing == 0:
while left < right and need[s[left]] < 0:
need[s[left]] += 1
left += 1
if not end or right - left <= end - start:
start, end = left, right
need[s[left]] += 1
missing += 1
left += 1
return s[start:end]
def boo_counter(self, s: str, t: str) -> str:
t_count = collections.Counter(t)
current_count = collections.Counter()
start = float('-inf')
end = float('inf')
left = 0
for right, char in enumerate(s, 1):
current_count[char] += 1
while current_count & t_count == t_count:
if right - left < end - start:
start,end = left, right
current_count[s[left]] -= 1
left += 1
return s[start:end] if end-start <= len(s) else ''
print(Solution().minWindow("ADOBECODEBANC", "ABC")) # "BANC
print(Solution().minWindow("a", "a")) | [
"dbfpzk142@gmail.com"
] | dbfpzk142@gmail.com |
25222e105efb2fc73e5d687d3d5a216295f15d07 | db64a89fa291fa3ddbb5a283412d66715acb6912 | /tbe/impl/leaky_relu_npu.py | c1e4dd9364c90a387e72eb39491e26845cc81d12 | [] | no_license | zyx1999/hw_operator | 7f310b6605d2eda5b153bb02087f7ce7c2432fa2 | cf5fda7c12b4d516ef51704996dfb7b42e989f41 | refs/heads/main | 2023-09-05T13:37:18.545373 | 2021-10-07T16:37:03 | 2021-10-07T16:37:03 | 378,068,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,517 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Copyright (C) 2018. Huawei Technologies Co., Ltd. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0.You may not use this
file except in compliance with the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Apache License for more details at
http://www.apache.org/licenses/LICENSE-2.0
cce extended operator builder wrapper
"""
from te import tvm
import te.lang.cce
from te.platform.fusion_manager import fusion_manager
from topi import generic
# pylint: disable=locally-disabled,unused-argument,invalid-name
@fusion_manager.register("leaky_relu_demo")
def leaky_relu_demo_compute(x, y, negative_slope=0, kernel_name="leaky_relu"):
"""
compute for caffe_relu_layer_cce
"""
inp_dtype = x.dtype.lower()
shape = x.shape
# The original relu logic remains unchanged.
if negative_slope == 0:
if inp_dtype in ("float32", "int32"):
tensor_zero = te.lang.cce.broadcast(tvm.const(0, inp_dtype), shape)
data_res = te.lang.cce.vmax(x, tensor_zero)
else:
data_res = te.lang.cce.vrelu(x)
data_res = te.lang.cce.cast_to(data_res, inp_dtype)
return data_res
# negative_slope != 0
if inp_dtype in ("float16", "float32"):
slope_tmp = tvm.const(negative_slope, dtype=inp_dtype)
tmp = te.lang.cce.vmuls(x, slope_tmp)
if negative_slope <= 1:
res = te.lang.cce.vmax(x, tmp)
else:
res = te.lang.cce.vmin(x, tmp)
else:
# inp_dtype in ("int32", "int8")
slope_tmp = tvm.const(negative_slope, dtype=inp_dtype)
tmp = te.lang.cce.vmuls(x, slope_tmp)
tmp_oritype = te.lang.cce.cast_to(tmp, inp_dtype)
if negative_slope <= 1:
res = te.lang.cce.vmax(x, tmp_oritype)
else:
res = te.lang.cce.vmin(x, tmp_oritype)
res = te.lang.cce.cast_to(res, inp_dtype)
return res
def leaky_relu_demo(x, y, negative_slope=0, kernel_name="leaky_relu"):
"""leaky_relu op for input tensor
f(x)= x(x>=0) or negative_slope*x(x<0) equal to
f(x)=negative_slope*x
Parameters
----------
x : TVM tensor
input tensor has shape and dtype attributes
y : dict
dict with keys(shape and dtype) of output
negative_slope : float or int
allow non-zero slope for negative inputs to speed up optimization
kernel_name : str
cce kernel name, default value is "leaky_relu"
Returns
------
None
"""
# check input tensor shape
shape = x.get("shape")
dtype = x.get("dtype")
# check input tensor data_type
check_list = ["float16", "float32", "int32", "int8"]
if dtype.lower() not in check_list:
raise RuntimeError(
"leaky relu only support %s while dtype is %s"
% (",".join(check_list), dtype))
inp_dtype = dtype.lower()
input_data_x = tvm.placeholder(shape, name="input_data_x", dtype=inp_dtype)
with tvm.target.cce():
res = leaky_relu_demo_compute(input_data_x, y, negative_slope, kernel_name)
sch = generic.auto_schedule(res)
config = {"name": kernel_name,
"tensor_list": [input_data_x, res]}
te.lang.cce.cce_build_code(sch, config)
| [
"18218490078@163.com"
] | 18218490078@163.com |
20baf6dabfef2f97561aa97f300756ece414e7a8 | 2975093d6ce2a66e0299510967116de899d8d393 | /pruebas/test_size.py | 7dd3686e051caa99c64fe34023d85e716540fc4e | [] | no_license | RamsesCamas/D-Pyvot | e4e008ff1c8734869c230a92f1312f211002aeb5 | 14f32f0e8b8a30f5bff7834ba2a987e62ef69bc2 | refs/heads/main | 2023-04-14T00:40:07.687132 | 2021-04-30T21:24:28 | 2021-04-30T21:24:28 | 360,339,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | import cv2
import numpy as np
img = cv2.imread('imgs/susto.jpg')
print(img.shape) #382,480
img_rs = cv2.resize(img,(640,480))
img_crop = img[0:200,0:200]
cv2.imshow('Imagen',img)
cv2.imshow('Imagen redimensionada',img_rs)
cv2.imshow('Imagen cortada',img_crop)
cv2.waitKey(0) | [
"machiniram@gmail.com"
] | machiniram@gmail.com |
67ddf0f228615775a73a1d0de0387e72ea091a88 | 1038db151d2eb24280272aecb9337053548acced | /Password generator Hard Way.py | 2e04239280da0d44c1e46d494e7d70813dd87df2 | [] | no_license | mygerges/100-Days-of-Code | 75ffeb91ea39c77d0a852c8d64037633574178f4 | 1e0a01c2853ce05204c3a36bccd8244739050e87 | refs/heads/main | 2023-02-28T00:48:30.599680 | 2021-02-06T13:52:19 | 2021-02-06T13:52:19 | 336,290,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | # Password generator Hard Way
import random
letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
symbols = ['!', '#', '$', '%', '&', '(', ')', '*', '+']
print("Welcome to the PyPassword Generator")
letter_selected = int(input("How many letters would you like in your password\n"))
symbols_selected = int(input("How many symbols would you like in your password\n"))
numbers_selected = int(input("How many numbers would you like in your password\n"))
password_list =[]
for char in range (1, letter_selected + 1):
password_list.append(random.choice(letters))
for char in range(1, symbols_selected + 1):
password_list += random.choice(numbers)
for char in range(1, numbers_selected + 1):
password_list += random.choice(symbols)
random.shuffle(password_list)
password = ""
for char in password_list:
password += char
print(password) | [
"mina.y.gerges@gmail.com"
] | mina.y.gerges@gmail.com |
ba739e1e9487460532edf7325747f1c35b66b048 | 1e9ad304868c2bda918c19eba3d7b122bac3923b | /kubernetes/client/models/v1beta1_http_ingress_rule_value.py | 168b201cfa6cc6450e6154e0ffdd4d11d9e0805c | [
"Apache-2.0"
] | permissive | pineking/client-python | c77e5bd3d476ac852e6dffa96056008baa0f597f | 74a64d7325518f4298600d4bb300f92843c29347 | refs/heads/master | 2021-01-22T22:16:27.368406 | 2017-03-15T08:21:21 | 2017-03-15T08:21:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,219 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.5.1-660c2a2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1HTTPIngressRuleValue(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, paths=None):
"""
V1beta1HTTPIngressRuleValue - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'paths': 'list[V1beta1HTTPIngressPath]'
}
self.attribute_map = {
'paths': 'paths'
}
self._paths = paths
@property
def paths(self):
"""
Gets the paths of this V1beta1HTTPIngressRuleValue.
A collection of paths that map requests to backends.
:return: The paths of this V1beta1HTTPIngressRuleValue.
:rtype: list[V1beta1HTTPIngressPath]
"""
return self._paths
@paths.setter
def paths(self, paths):
"""
Sets the paths of this V1beta1HTTPIngressRuleValue.
A collection of paths that map requests to backends.
:param paths: The paths of this V1beta1HTTPIngressRuleValue.
:type: list[V1beta1HTTPIngressPath]
"""
if paths is None:
raise ValueError("Invalid value for `paths`, must not be `None`")
self._paths = paths
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"mehdy@google.com"
] | mehdy@google.com |
16b6d2a2bb371aec5835e7f3d24bccfd8b4ec178 | b005d794cfd8e3b063b08d6a266b1e07f0f0f5e9 | /src/webapp/geolist/forms.py | 37418890337878c9eab7f4a4c60577f54493ef96 | [] | no_license | GeoRemindMe/GeoRemindMe_Web | 593c957faa5babb3040da86d94a5d884ad4b2db3 | d441693eedb32c36fe853895110df808a9959941 | refs/heads/master | 2021-01-16T18:29:39.633445 | 2011-11-05T23:50:37 | 2011-11-05T23:50:37 | 1,841,418 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 928 | py | # coding=utf-8
from django import forms
from django.utils.translation import gettext_lazy as _
from georemindme.models_utils import VISIBILITY_CHOICES
class ListRequestedForm(forms.Form):
name = forms.CharField(required=True)
description = forms.CharField(required=False,widget=forms.Textarea())
visibility = forms.ChoiceField(required=True, choices=VISIBILITY_CHOICES)
# only save if it is valid
def save(self, **kwargs):
from geouser.models import User
if not isinstance(kwargs['user'], User):
raise TypeError
from models import ListRequested
if kwargs['id'] is None:
list = ListRequested.insert_list(user=kwargs['user'],
name=self.cleaned_data['name'],
description=self.cleaned_data['description']
) | [
"jneight@gmail.com"
] | jneight@gmail.com |
75afb06cb1144006f881e49d5a26f90b5f5c599b | 961782dd9414f9fc02643875bb174f2c1692e0f4 | /python_data/kFold.py | bd51067ddb93752bb708a0208a9a6dc6fbafa656 | [] | no_license | haoyu7/ECE-4424-Final-Project | 46da07502f4a06693737eca16cd72e4de4c9778b | 9de5e660c3d6ce3f9ea678e5acf8e8ed665ed730 | refs/heads/master | 2021-08-28T23:19:56.583969 | 2017-12-13T08:22:11 | 2017-12-13T08:22:11 | 114,090,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,137 | py | import pandas as pd
import numpy as np
from BayesNet import *
def kFold(data,k=10,structure=[0,0,0,0,0,0],verbose=True):
'''
Carries out 10-fold CV
'''
x=np.repeat(list(range(k)),repeats=(len(data)/k))
data['fold'] = pd.Series(x)
foldSize = len(data)/k
accuracyList = []
for fold in range(k):
train = data[data['fold']!=fold]
test = data[data['fold']==fold]
train.drop('fold',axis = 1, inplace=True)
test.drop('fold',axis = 1, inplace=True)
net = BayesNet(4,structure)
net.initGraph()
net.compCPT(train)
errors = 0
#Testing
for i in range(len(test)):
y = test.iloc[i:(i+1)]
out = net.predict(y)
if out != test.iloc[i]['Class']:
errors+=1
acc = float(foldSize - errors)/foldSize
accuracyList.append(acc)
if verbose==True:
print("Fold :%d Accuracy : %f"%(fold,acc))
if verbose ==True:
print("Overall CV accuracy : %f"%(np.mean(accuracyList)))
return(np.mean(accuracyList)) | [
"haoyu7@vt.edu"
] | haoyu7@vt.edu |
93f230054d400799104fcdb3ea09d4509a9a6aed | 3a00e5e2ee41d9de0f1e973e5727d5ba36c5858f | /4.py | e9b14b5a4eb4059805cbda94d3644ad1217ab6a4 | [] | no_license | Ashok0822/ashok | 53380dec5042bcbd7a99c78b9fcdb817abe790f3 | 9c5cf5806476aee242695d5d6fdbbb9c0578b6dd | refs/heads/master | 2020-06-21T02:47:15.166541 | 2019-07-17T06:44:22 | 2019-07-17T06:44:22 | 197,326,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | #a
b1,d1=input().split()
if(len(b1)>len(d1)):
print(b1)
else:
print(d1)
| [
"noreply@github.com"
] | Ashok0822.noreply@github.com |
738f04c894b1bf7886e3975064d4408d62f67ff1 | e3714ffec33610203e0c4d00bebcd6f9c97e2ddd | /cfgs/config.py | 7273e6824af83763171e0cd6839b4847a2ddeff7 | [] | no_license | Peratham/yolo2-pytorch | 21cd092efffd9f38a740ac14ead32833fa9d131a | 6a4c2156ef9d0ca8c1ea44c0bde3c296ad42a96a | refs/heads/master | 2021-01-21T10:42:10.049098 | 2017-02-28T16:09:36 | 2017-02-28T16:09:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,512 | py | import os
from config_voc import *
from exps.darknet19_exp1 import *
def mkdir(path, max_depth=3):
parent, child = os.path.split(path)
if not os.path.exists(parent) and max_depth > 1:
mkdir(parent, max_depth-1)
if not os.path.exists(path):
os.mkdir(path)
# input and output size
############################
inp_size = np.array([416, 416], dtype=np.int)
out_size = inp_size / 32
# for display
############################
def _to_color(indx, base):
""" return (b, r, g) tuple"""
base2 = base * base
b = 2 - indx / base2
r = 2 - (indx % base2) / base
g = 2 - (indx % base2) % base
return b * 127, r * 127, g * 127
base = int(np.ceil(pow(num_classes, 1. / 3)))
colors = [_to_color(x, base) for x in range(num_classes)]
# detection config
############################
thresh = 0.3
# dir config
############################
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
DATA_DIR = os.path.join(ROOT_DIR, 'data')
MODEL_DIR = os.path.join(ROOT_DIR, 'models')
TRAIN_DIR = os.path.join(MODEL_DIR, 'training')
TEST_DIR = os.path.join(MODEL_DIR, 'testing')
trained_model = os.path.join(MODEL_DIR, h5_fname)
pretrained_model = os.path.join(MODEL_DIR, pretrained_fname)
train_output_dir = os.path.join(TRAIN_DIR, exp_name)
test_output_dir = os.path.join(TEST_DIR, imdb_test, h5_fname)
mkdir(train_output_dir, max_depth=3)
mkdir(test_output_dir, max_depth=4)
rand_seed = 1024
use_tensorboard = True
max_epoch = 10
max_step = 1000
| [
"longch1024@gmail.com"
] | longch1024@gmail.com |
01e08f54ced01fc596322abf5335aff753179702 | b7a31b27b2ae0386c9a4c96b668a34caa6ce93e6 | /models/networks/LeNet300.py | 91c652e5a6401db1dccd391f0974f5dd1572c1a8 | [
"MIT"
] | permissive | scott-mao/SNIP-it | 7663c58025c7639ffe5eb42d969c2555af1d9327 | fec11fcda3038d9ad7246ddb1ee9889049f03fc8 | refs/heads/master | 2023-01-07T10:04:54.215425 | 2020-06-12T15:18:06 | 2020-06-12T15:18:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | import torch
import torch.nn as nn
from models.Pruneable import Pruneable
import numpy as np
class LeNet300(Pruneable):
def __init__(self, device="cuda", output_dim=2, input_dim=(1,), **kwargs):
super(LeNet300, self).__init__(device=device, output_dim=output_dim, input_dim=input_dim, **kwargs)
input_dim = int(np.prod(input_dim))
leak = 0.05
gain = nn.init.calculate_gain('leaky_relu', leak)
self.layers = nn.Sequential(
self.Linear(input_dim=input_dim, output_dim=300, bias=True, gain=gain),
nn.BatchNorm1d(300, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.LeakyReLU(leak),
self.Linear(input_dim=300, output_dim=100, bias=True, gain=gain),
nn.BatchNorm1d(100, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True),
nn.LeakyReLU(leak),
self.Linear(input_dim=100, output_dim=output_dim, bias=True)
).to(device)
def forward(self, x: torch.Tensor):
x = x.view(x.shape[0], -1)
return self.layers.forward(x)
if __name__ == '__main__':
device = "cuda"
mnist = torch.randn((21, 1, 28, 28)).to(device)
cifar = torch.randn((21, 3, 32, 32)).to(device)
imagenet = torch.randn((2, 4, 244, 244)).to(device)
for test_batch in [mnist, cifar, imagenet]:
conv = LeNet300(output_dim=10, input_dim=test_batch.shape[1:], device=device)
print(conv.forward(test_batch).shape) | [
"stijn@verdenius.com"
] | stijn@verdenius.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.