blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ca961b1e71f986614c0bf218071657be14d01724 | 7a9607d97bbf19eea7f539013ad96b21449111d0 | /dialogs/views.py | 468e291b442b2b11118e6ead4b81d2d5a0ed4b96 | [] | no_license | python-coach/microsocial2 | a9911a34f8a47d10f42c0d5ea5de934cf76bc984 | b3b64a231d90cee41f0a4b707cd11a124fbe088f | refs/heads/master | 2021-01-02T22:45:22.986911 | 2015-06-30T14:36:56 | 2015-06-30T14:36:56 | 35,723,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,789 | py | from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.utils.decorators import method_decorator
from django.views.generic.base import TemplateView
from dialogs.forms import MessageForm
from dialogs.models import Dialog
class DialogView(TemplateView):
template_name = 'dialogs/dialog.html'
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
self.opponent = None
self.dialog = None
self.form = None
if 'user_id' in kwargs:
self.opponent = get_object_or_404(get_user_model(), pk=kwargs['user_id'])
self.dialog = Dialog.objects.get_or_create(request.user, self.opponent)
if not self.dialog:
raise Http404
self.form = MessageForm(request.POST or None)
return super(DialogView, self).dispatch(request, *args, **kwargs)
def get_dialogs(self):
qs = Dialog.objects.for_user(self.request.user).select_related('user1', 'user2').filter(
last_message__isnull=False
).order_by('-last_message__created')
paginator = Paginator(qs, 20)
page = self.request.GET.get('dialogs-page')
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
def get_messages(self):
if not self.dialog:
return
paginator = Paginator(self.dialog.messages.all(), 20)
page = self.request.GET.get('messages-page')
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
def get_context_data(self, **kwargs):
context = super(DialogView, self).get_context_data(**kwargs)
context['dialogs'] = self.get_dialogs()
context['opponent'] = self.opponent
context['dialog_messages'] = self.get_messages()
context['form'] = self.form
return context
def post(self, request, *args, **kwargs):
if self.form and self.form.is_valid():
message = self.form.save(commit=False)
message.dialog = self.dialog
message.sender = request.user
message.save()
# return redirect('messages', kwargs={'user_id': self.opponent.pk})
return redirect(request.get_full_path())
return self.get(request, *args, **kwargs)
| [
"igormeln@gmail.com"
] | igormeln@gmail.com |
8a83bf8ef20f20ee95cecc0b2477a686e3b614b9 | d53c2df06539619d97d2de2c0150ec4821eef8a7 | /django_env/bin/pygmentize | 8e0319c1fe38fc496cd7ef8a21ddfc0d4dcde7d6 | [] | no_license | joelranjithjebanesan7/Python-Django | 4098cff3fcae6b5dbd33c977d2b00d8dce627a0e | 42ec7bdfa678d7efe10972fad359b866af3fdf58 | refs/heads/master | 2020-04-17T23:14:38.044382 | 2019-03-02T06:09:06 | 2019-03-02T06:09:06 | 167,027,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | #!/home/joelrj/Python-Django/django_env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pygments.cmdline import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"joel@tivonaglobal.com"
] | joel@tivonaglobal.com | |
19bad92cb7eb649553003bebd4eaf1d46b583df9 | d44ce693005ad7526060275804899a8154031e57 | /wikipedia/wikipedia.py | d8da9334cbd4b556fb835ef7666a1a8f771667f2 | [
"MIT"
] | permissive | bobbybabra/Wikipedia | efffe6047ba5560d2f31c1ff99722c188860fd48 | bc92b6565a4f0e1db99b595de943efba06c7f1e5 | refs/heads/master | 2021-01-18T04:49:59.057298 | 2013-08-23T18:22:31 | 2013-08-23T18:22:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,643 | py | import requests
from bs4 import BeautifulSoup
from .exceptions import *
from .util import cache
@cache
def search(query, results=10, suggestion=False):
"""
Do a Wikipedia search for `query`.
Keyword arguments:
* results - the maxmimum number of results returned
* suggestion - if True, return results and suggestion (if any) in a tuple
"""
search_params = {
"list": "search",
"srprop": "",
"srlimit": results
}
if suggestion:
search_params["srinfo"] = "suggestion"
search_params["srsearch"] = query
search_params["limit"] = results
raw_results = _wiki_request(**search_params)
search_results = (d['title'] for d in raw_results['query']['search'])
if suggestion:
if raw_results['query'].get('searchinfo'):
return list(search_results), raw_results['query']['searchinfo']['suggestion']
else:
return list(search_results), None
return list(search_results)
@cache
def suggest(query):
"""
Get a Wikipedia search suggestion for `query`.
Returns a string or None if no suggestion was found.
"""
search_params = {
"list": "search",
"srinfo": "suggestion",
"srprop": "",
}
search_params['srsearch'] = query
raw_result = _wiki_request(**search_params)
if raw_result['query'].get('searchinfo'):
return raw_result['query']['searchinfo']['suggestion']
return None
def random(pages=1):
"""
Get a list of random Wikipedia article titles.
.. note:: Random only gets articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
Keyword arguments:
* pages - the number of random pages returned (max of 10)
"""
#http://en.wikipedia.org/w/api.php?action=query&list=random&rnlimit=5000&format=jsonfm
query_params = {
'list': "random",
'rnnamespace': 0,
'rnlimit': pages,
}
request = _wiki_request(**query_params)
titles = [page['title'] for page in request['query']['random']]
if len(titles) == 1:
return titles[0]
return titles
@cache
def summary(title, sentences=0, chars=0, auto_suggest=True, redirect=True):
"""
Plain text summary of the page.
.. note:: This is a convenience wrapper - auto_suggest and redirect are enabled by default
Keyword arguments:
* sentences - if set, return the first `sentences` sentences
* chars - if set, return only the first `chars` characters.
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
"""
# use auto_suggest and redirect to get the correct article
# also, use page's error checking to raise DisambiguationError if necessary
page_info = page(title, auto_suggest=True, redirect=True)
title = page_info.title
pageid = page_info.pageid
query_params = {
'prop': "extracts",
'explaintext': "",
'titles': title
}
if sentences:
query_params['exsentences'] = sentences
elif chars:
query_params['exchars'] = chars
else:
query_params['exintro'] = ""
request = _wiki_request(**query_params)
summary = request['query']['pages'][pageid]['extract']
return summary
def page(title, auto_suggest=True, redirect=True, preload=False):
"""
Get a WikipediaPage object for the page with title `title`.
Keyword arguments:
* auto_suggest - let Wikipedia find a valid page title for the query
* redirect - allow redirection without raising RedirectError
* preload - load content, summary, images, references, and links during initialization
"""
if auto_suggest:
results, suggestion = search(title, results=1, suggestion=True)
try:
title = suggestion or results[0]
except IndexError:
# if there is no suggestion or search results, the page doesn't exist
raise PageError(title)
return WikipediaPage(title, redirect=redirect, preload=preload)
class WikipediaPage(object):
"""
Contains data from a Wikipedia page.
Uses property methods to filter data from the raw HTML.
"""
def __init__(self, title, redirect=True, preload=False, original_title=""):
self.title = title
self.original_title = original_title or title
self.load(redirect=redirect, preload=preload)
if preload:
for prop in ["content", "summary", "images", "references", "links"]:
getattr(self, prop)
def __repr__(self):
return u'<WikipediaPage \'%s\'>' % self.title
def load(self, redirect=True, preload=False):
"""
Load basic information from Wikipedia.
Confirm that page exists and is not a disambiguation/redirect.
"""
query_params = {
'prop': "info|categories",
'inprop': "url",
'clcategories': "Category:All disambiguation pages",
'titles': self.title
}
request = _wiki_request(**query_params)
pageid = request['query']['pages'].keys()[0]
data = request['query']['pages'][pageid]
# missing is equal to empty string if it is True
if data.get('missing') == "":
raise PageError(self.title)
# same thing for redirect
elif data.get('redirect') == "":
if redirect:
# change the title and reload the whole object
query_params = {
'prop': "extracts",
'explaintext': "",
'titles': self.title
}
request = _wiki_request(**query_params)
title = ' '.join(request['query']['pages'][pageid]['extract'].split()[1:])
self.__init__(title, redirect=redirect, preload=preload)
else:
raise RedirectError(self.title)
# since we limited categories, if a category is returned
# then the page must be a disambiguation page
elif data.get('categories'):
request = _wiki_request(titles=self.title, prop="revisions", rvprop="content", rvparse="", rvlimit=1)
html = request['query']['pages'][pageid]['revisions'][0]['*']
may_refer_to = [li.a.get_text() for li in BeautifulSoup(html).ul.find_all('li')]
raise DisambiguationError(self.title, may_refer_to)
else:
self.pageid = pageid
self.url = data['fullurl']
def html(self):
"""
Get full page HTML.
.. warning:: this can get pretty slow on long pages.
"""
if not getattr(self, "_html", False):
query_params = {
'prop': "revisions",
'rvprop': "content",
'rvlimit': 1,
'rvparse': "",
'titles': self.title
}
request = _wiki_request(**query_params)
self._html = request['query']['pages'][self.pageid]['revisions'][0]['*']
return self._html
@property
def content(self):
"""
Plain text content of the page, excluding images, tables, and other data.
"""
if not getattr(self, "_content", False):
query_params = {
'prop': "extracts",
'explaintext': "",
'titles': self.title
}
request = _wiki_request(**query_params)
self._content = content = request['query']['pages'][self.pageid]['extract']
return self._content
@property
def summary(self):
"""
Plain text summary of the page.
Keyword arguments:
* sentences - if set, return the first `sentences` sentences
* chars - if set, return only the first `chars` characters.
"""
# cache the most common form of invoking summary
if not getattr(self, "_summary", False):
query_params = {
'prop': "extracts",
'explaintext': "",
'exintro': "",
'titles': self.title
}
request = _wiki_request(**query_params)
self._summary = request['query']['pages'][self.pageid]['extract']
return self._summary
@property
def images(self):
"""
List of URLs of images on the page.
"""
if not getattr(self, "_images", False):
query_params = {
'generator': "images",
'gimlimit': "max",
'prop': "imageinfo",
'iiprop': "url",
'titles': self.title,
}
request = _wiki_request(**query_params)
image_keys = request['query']['pages'].keys()
images = (request['query']['pages'][key] for key in image_keys)
self._images = [image['imageinfo'][0]['url'] for image in images if image.get('imageinfo')]
return self._images
@property
def references(self):
"""
List of URLs of external links on a page.
May include external links within page that aren't technically cited anywhere.
"""
if not getattr(self, "_references", False):
query_params = {
'prop': "extlinks",
'ellimit': "max",
'titles': self.title,
}
request = _wiki_request(**query_params)
links = request['query']['pages'][self.pageid]['extlinks']
relative_urls = (link['*'] for link in links)
def add_protocol(url):
return url if url.startswith('http') else 'http:' + url
self._references = [add_protocol(url) for url in relative_urls]
return self._references
@property
def links(self):
"""
List of titles of Wikipedia page links on a page.
.. note:: Only includes articles from namespace 0, meaning no Category, User talk, or other meta-Wikipedia pages.
"""
if not getattr(self, "_links", False):
links = []
query_params = {
'prop': "links",
'plnamespace': 0,
'pllimit': "max",
'titles': self.title,
}
while True:
request = _wiki_request(**query_params)
links.extend([link['title'] for link in request['query']['pages'][self.pageid]['links']])
if not request.get('query-continue'):
break
query_params['plcontinue'] = request['query-continue']['links']['plcontinue']
self._links = links
return self._links
def donate():
"""
Open up the Wikimedia donate page in your favorite browser.
"""
import webbrowser
webbrowser.open("https://donate.wikimedia.org/w/index.php?title=Special:FundraiserLandingPage", new=2)
def _wiki_request(**params):
"""
Make a request to the Wikipedia API using the given search parameters.
Returns a parsed dict of the JSON response.
"""
api_url = "http://en.wikipedia.org/w/api.php"
params['format'] = "json"
params['action'] = "query"
r = requests.get(api_url, params=params)
return r.json() | [
"jhghank@gmail.com"
] | jhghank@gmail.com |
9b64afa65c9d6ded04f35a8e66d55c8a70318c62 | d488f052805a87b5c4b124ca93494bc9b78620f7 | /google-cloud-sdk/.install/.backup/lib/googlecloudsdk/third_party/apis/serviceuser/v1/serviceuser_v1_client.py | 81860d607ce811e4c113893404142a4427ea51cd | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | PacktPublishing/DevOps-Fundamentals | 5ce1fc938db66b420691aa8106ecfb3f9ceb1ace | 60597e831e08325c7e51e8557591917f7c417275 | refs/heads/master | 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null | UTF-8 | Python | false | false | 7,177 | py | """Generated client library for serviceuser version v1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.serviceuser.v1 import serviceuser_v1_messages as messages
class ServiceuserV1(base_api.BaseApiClient):
"""Generated client library for service serviceuser version v1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://serviceuser.googleapis.com/'
_PACKAGE = u'serviceuser'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloud-platform.read-only', u'https://www.googleapis.com/auth/service.management']
_VERSION = u'v1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'ServiceuserV1'
_URL_VERSION = u'v1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new serviceuser handle."""
url = url or self.BASE_URL
super(ServiceuserV1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_services = self.ProjectsServicesService(self)
self.projects = self.ProjectsService(self)
self.services = self.ServicesService(self)
class ProjectsServicesService(base_api.BaseApiService):
"""Service class for the projects_services resource."""
_NAME = u'projects_services'
def __init__(self, client):
super(ServiceuserV1.ProjectsServicesService, self).__init__(client)
self._upload_configs = {
}
def Disable(self, request, global_params=None):
"""Disable a service so it can no longer be used with a.
project. This prevents unintended usage that may cause unexpected billing
charges or security leaks.
Operation<response: google.protobuf.Empty>
Args:
request: (ServiceuserProjectsServicesDisableRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Disable')
return self._RunMethod(
config, request, global_params=global_params)
Disable.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'serviceuser.projects.services.disable',
ordered_params=[u'projectsId', u'servicesId'],
path_params=[u'projectsId', u'servicesId'],
query_params=[],
relative_path=u'v1/projects/{projectsId}/services/{servicesId}:disable',
request_field=u'disableServiceRequest',
request_type_name=u'ServiceuserProjectsServicesDisableRequest',
response_type_name=u'Operation',
supports_download=False,
)
def Enable(self, request, global_params=None):
"""Enable a service so it can be used with a project.
See [Cloud Auth Guide](https://cloud.google.com/docs/authentication) for
more information.
Operation<response: google.protobuf.Empty>
Args:
request: (ServiceuserProjectsServicesEnableRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Enable')
return self._RunMethod(
config, request, global_params=global_params)
Enable.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'serviceuser.projects.services.enable',
ordered_params=[u'projectsId', u'servicesId'],
path_params=[u'projectsId', u'servicesId'],
query_params=[],
relative_path=u'v1/projects/{projectsId}/services/{servicesId}:enable',
request_field=u'enableServiceRequest',
request_type_name=u'ServiceuserProjectsServicesEnableRequest',
response_type_name=u'Operation',
supports_download=False,
)
def List(self, request, global_params=None):
"""List enabled services for the specified consumer.
Args:
request: (ServiceuserProjectsServicesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListEnabledServicesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'serviceuser.projects.services.list',
ordered_params=[u'projectsId'],
path_params=[u'projectsId'],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1/projects/{projectsId}/services',
request_field='',
request_type_name=u'ServiceuserProjectsServicesListRequest',
response_type_name=u'ListEnabledServicesResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(ServiceuserV1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
class ServicesService(base_api.BaseApiService):
"""Service class for the services resource."""
_NAME = u'services'
def __init__(self, client):
super(ServiceuserV1.ServicesService, self).__init__(client)
self._upload_configs = {
}
def Search(self, request, global_params=None):
"""Search available services.
When no filter is specified, returns all accessible services. For
authenticated users, also returns all services the calling user has
"servicemanagement.services.bind" permission for.
Args:
request: (ServiceuserServicesSearchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(SearchServicesResponse) The response message.
"""
config = self.GetMethodConfig('Search')
return self._RunMethod(
config, request, global_params=global_params)
Search.method_config = lambda: base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'serviceuser.services.search',
ordered_params=[],
path_params=[],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1/services:search',
request_field='',
request_type_name=u'ServiceuserServicesSearchRequest',
response_type_name=u'SearchServicesResponse',
supports_download=False,
)
| [
"saneetk@packtpub.com"
] | saneetk@packtpub.com |
e30f4d590e82a42c8f381fff4999181b8c0f708c | fc7d2c565105e31c0734006f895306dba3810721 | /Data/dataset.py | ed45c7d2debfe23c2d6d516f833c4a1d920c076b | [] | no_license | Marmelator/Hyperparameter-Anthillsearch | ad0801916ff0f6f808e6436961f164c098102661 | 5ca93a34bc4e2a275a56465882df4611920a7f4a | refs/heads/main | 2023-04-11T23:48:33.332621 | 2021-05-07T20:25:10 | 2021-05-07T20:25:10 | 365,344,731 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 651 | py | import numpy as np
import pandas as pd
import os
from sklearn.preprocessing import OneHotEncoder
class MushroomData():
def __init__(self):
location = os.path.join(os.path.dirname(__file__), 'mushrooms.csv')
self.x, self.y = self.assemble_matrix(location)
def assemble_matrix(self, file_name):
data = pd.read_csv(file_name)
y = pd.DataFrame(data['class'])
data.drop('class', axis=1, inplace=True)
feature_encoder = OneHotEncoder(drop='if_binary')
x = feature_encoder.fit_transform(data).toarray()
y = feature_encoder.fit_transform(y).toarray()
return np.transpose(x), y | [
"kilian.schuettler@eckert-partner.it"
] | kilian.schuettler@eckert-partner.it |
2618795e21cd045120479b98251290d8eaa8f2c1 | 89d2587ff2bbefa763d0b471a1c2c2a295b87c8d | /workshop/build_cmos_gate_model.py | b48c4135bac6c861ac809db225151d8ba634b16b | [] | no_license | guyroyse/deep-learning-like-a-viking | 25fdd49505d2ab8ac464e568684bcc4c0ec0cf3d | 1df65fe7bb9a7e40f2d8e26796460de85f7a0ac9 | refs/heads/master | 2023-02-21T06:30:18.311773 | 2022-10-25T21:03:06 | 2022-10-25T21:03:06 | 142,622,777 | 31 | 12 | null | 2023-02-16T06:51:17 | 2018-07-27T20:54:43 | Python | UTF-8 | Python | false | false | 1,577 | py | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from keras.models import Sequential
from keras.layers import Dense
from keras.utils import np_utils
# x holds the voltages, y holds the labels
x, y = [], []
# read the data from the CSV
data = pd.read_csv('data/cmos/cmos-gate.csv', encoding='utf-8')
X = data.filter(items=['input_voltage_1','input_voltage_2'])
y = data.filter(items=['output_value'])
# convert X and y to numpy arrays
X, y = np.array(X), np.array(y).flatten()
# create an encoder for the labels
encoder = LabelEncoder()
encoder.fit([True, False])
# convert the labels from strings to numbers
y = encoder.transform(y)
# convert the labels from numbers to one-hot encoding
Y = np_utils.to_categorical(y, 2)
# split for train and test
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)
# configure the neural network
model = Sequential()
model.add(Dense(24, input_shape=(2, ), activation='relu'))
model.add(Dense(2, activation='softmax'))
# compile the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# train it
model.fit(X_train, Y_train, batch_size=32, epochs=5, verbose=1)
# evaluate it
print()
print(model.evaluate(X_test, Y_test, verbose=0))
print()
# save it
model.save_weights('models/cmos_gate_model.h5')
with open('models/cmos_gate_model.json', 'w') as file:
file.write(model.to_json())
print("Saved model to 'models/cmos_gate_model.h5' and 'models/cmos_gate_model.json'")
print()
| [
"guy@guyroyse.com"
] | guy@guyroyse.com |
097bcb484e898145895118958d891df3c5377fe3 | 183e4126b2fdb9c4276a504ff3ace42f4fbcdb16 | /I семестр/Програмування (Python)/Лабораторні/Братун 6305/Приклади/34/Ex26.py | 4c5ba37cc50a32792e7f969423731ecf7a45162d | [] | no_license | Computer-engineering-FICT/Computer-engineering-FICT | ab625e2ca421af8bcaff74f0d37ac1f7d363f203 | 80b64b43d2254e15338060aa4a6d946e8bd43424 | refs/heads/master | 2023-08-10T08:02:34.873229 | 2019-06-22T22:06:19 | 2019-06-22T22:06:19 | 193,206,403 | 3 | 0 | null | 2023-07-22T09:01:05 | 2019-06-22T07:41:22 | HTML | UTF-8 | Python | false | false | 296 | py | import re
p1 = re.compile(r"[0-9]+")
print(p1.findall("2012, 2013, 2014, 2015, 2016"))
p2 = re.compile(r"[a-z]+")
print(p2.findall("2012, 2013, 2014, 2015, 2016"))
t = r"[0-9]{3}-[0-9]{2}-[0-9]{2}"
p = re.compile(t)
print(p.findall("322-55-98"))
print(p.findall("322-55-98, 678-56-12"))
| [
"mazanyan027@gmail.com"
] | mazanyan027@gmail.com |
bd548c6e28569374dce6cece185f426673c7f3d6 | 8d0eec5c051cf902df1ef004b537115b888fe5c6 | /async_dev/generators_two_way.py | 7483829ccf1ffe0d0ef3648065fd504c53c26ea0 | [] | no_license | MadhuV99/complete_py_course | 494300225eef49470a92290f908c1d6f1296cb4f | ade2ac8c5722c45196b700d3ad99f37c9deb76d8 | refs/heads/main | 2023-02-24T06:57:57.441762 | 2021-02-04T03:49:58 | 2021-02-04T03:49:58 | 329,334,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | from collections import deque
# friends = ['Rolf', 'Jose', 'Charlie', 'Jen', 'Anna']
friends = deque(('Rolf', 'Jose', 'Charlie', 'Jen', 'Anna'))
def get_friend():
yield from friends
def greet(g):
while True:
try:
friend = next(g)
yield f'HELLO {friend}'
except StopIteration:
pass
friends_generator = get_friend()
g = greet(friends_generator)
print(next(g))
print(next(g)) | [
"madhuvasudevan@yahoo.com"
] | madhuvasudevan@yahoo.com |
51d7332654eab786b063de914b6c3a743ffaef5e | ae86ecac0a85317029097edef4726f63c1f7008a | /nblearn_part3.py | 96d796eecafa2f44f63cca14c9250f3d25232b7d | [] | no_license | vivektiwari7114/Text-Classification | 3a65b36cf2a4ae2d5820d4a7adca4aee326688b2 | ac94c9c37748f8a4d5f7c4ba8df7bc2a079ddc47 | refs/heads/master | 2021-01-18T20:18:01.355514 | 2016-09-27T21:34:35 | 2016-09-27T21:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,569 | py | #Basic Program Needs
import sys
import os
import math
from os.path import join, getsize
#Global Variables
spamDict = {}
hamDict = {}
#Function to get recursively search the directory
def getTheListOfFiles(folderName, rootDir):
nameOfFiles =[]
for root, dirs, files in os.walk(rootDir):
getAllFilesNames = root.split('/')
lastFile = getAllFilesNames[len(getAllFilesNames) - 1].lower()
if(lastFile == folderName):
for name in files:
if name.endswith((".txt")):
nameOfFiles.append(os.path.join(root, name))
return nameOfFiles
#Function to open a file and create dictionary
def openIndividualFile(fileName, tempDict):
fileContent = open(fileName, "r", encoding="latin1").read()
listOfWords = fileContent.split()
for word in listOfWords:
sWord = word.lower()
if sWord in tempDict:
tempDict[sWord] = tempDict.get(sWord) + 1
else:
tempDict[sWord] = 1
return tempDict
def calculateSetDifference(source, destination):
tempList = source.keys()
for item in tempList:
if item not in destination:
destination[item] = 0
return destination
def countTheTotalWords(tempDict):
count =0;
listOfName = tempDict.keys()
for lNames in listOfName:
count = count + tempDict.get(lNames)
return count
def calculateProbablity(givenClassDict, wordsInGivenClass, distinctCount,newProbDict):
tempList = givenClassDict.keys()
for itemD in tempList:
num = givenClassDict.get(itemD) + 1
denom = wordsInGivenClass + distinctCount
prob = math.log(num) - math.log(denom)
newProbDict[itemD] = prob
return newProbDict
def writeInFile(key, count, prob):
contentWrite = key + " "+str(count)+" "+str(prob)+" \n"
outputfile.write(contentWrite)
#Execution of the Main program
sourceRootDirectory = sys.argv[1]
spamFiles = getTheListOfFiles("spam",sourceRootDirectory)
hamFiles = getTheListOfFiles("ham", sourceRootDirectory)
totalSpamcount = len(spamFiles) # Total Number of Training data Marked as SPAM
totalHamcount = len(hamFiles) # Total Number of Training data Marked as HAM
totalFileCount = totalSpamcount + totalHamcount # Total Number of Training data
for sName in spamFiles:
spamDict = openIndividualFile(sName, spamDict)
for hName in hamFiles:
hamDict = openIndividualFile(hName, hamDict)
#To remove top 100 most frequent words
withoutTopHundredSpamDict = sorted(spamDict, key = spamDict.get, reverse=True)[:100]
withoutTopHundredHamDict = sorted(hamDict, key = hamDict.get, reverse=True)[:100]
commonDict = set(withoutTopHundredSpamDict).intersection(withoutTopHundredHamDict)
for wordFound in commonDict:
if wordFound in spamDict.keys():
del spamDict[wordFound]
if wordFound in hamDict.keys():
del hamDict[wordFound]
#Normalization for Set difference
normalizeSpamDict = spamDict
normalizeHamDict = hamDict
normalizeSpamDict = calculateSetDifference(hamDict, spamDict)
normalizeHamDict = calculateSetDifference(spamDict, hamDict)
distinctCount = len(normalizeSpamDict) #Distict number of words in both Normalized SPAM and HAM Dictionary
wordsInSpam = countTheTotalWords(normalizeSpamDict) #Total Number of Words in SPAM
wordsInHam = countTheTotalWords(normalizeHamDict) # Total Number of Words in HAM
probablityForWordsInSpam = {}
probablityForWordsInHam = {}
#Calculate Probablity for Words in SPAM
probablityForWordsInSpam = calculateProbablity(normalizeSpamDict, wordsInSpam, distinctCount,probablityForWordsInSpam)
#Calculate Probablity for Words in HAM
probablityForWordsInHam = calculateProbablity(normalizeHamDict, wordsInHam, distinctCount,probablityForWordsInHam)
#Open the file for write
outputfile = open("nbmodel.txt", 'w')
outputfile.write("SPAMOUTPUT\n")
printSList = normalizeSpamDict.keys()
for itemized in printSList:
writeInFile(itemized, normalizeSpamDict.get(itemized) , probablityForWordsInSpam.get(itemized))
outputfile.write("HAMOUTPUT\n")
printHList = normalizeHamDict.keys()
for itemized in printHList:
writeInFile(itemized, normalizeHamDict.get(itemized) , probablityForWordsInHam.get(itemized))
outputfile.write("endOfFile\n")
outputfile.write("totalSpamWords "+str(wordsInSpam) + " \n")
outputfile.write("totalHamWords "+str(wordsInHam) + " \n")
outputfile.write("distinctWords "+str(distinctCount) + " \n")
outputfile.write("totalSpamFIles "+str(totalSpamcount) + " \n")
outputfile.write("totalHamFIles "+str(totalHamcount) + " \n")
| [
"vivektiwari7114@gmail.com"
] | vivektiwari7114@gmail.com |
f0ae052c5b0b8463da08c228210c0886e7c2f4a6 | 2fa12cde6a091a1559617e8f825b00f2a5c7f8ba | /src/007.py | 7441d7fc81f6d9ddf193b5423fc9674d9eb1bc6f | [] | no_license | yeasellllllllll/bioinfo-lecture-2021-07 | b9b333183047ddac4436180cd7c679e3cc0e399a | ce695c4535f9d83e5c9b4a1a8a3fb5857d2a984f | refs/heads/main | 2023-06-15T20:31:35.101747 | 2021-07-18T14:31:27 | 2021-07-18T14:31:27 | 382,995,460 | 0 | 0 | null | 2021-07-05T06:06:35 | 2021-07-05T02:45:29 | Python | UTF-8 | Python | false | false | 87 | py |
for i in range(2,9,2):
for j in range(1,10,1):
print(i, "*", j, '=', i*j)
| [
"yeasel6112@gmail.com"
] | yeasel6112@gmail.com |
a49e0a245eeb08aacd28e8d456ca89e6f3b6c12b | f577d89766b44b3fbe3d21ab288ccb819a65a097 | /solver/swa.py | b3bcb9e6907af7e3164fe76d73e4521c4f65339d | [
"Apache-2.0"
] | permissive | SmallMunich/centerX | 79fcb417b798c1cd671f54fb81d9103496a3ac33 | 1073753533f26483c3ab053a7d8753708fcacde7 | refs/heads/master | 2023-02-09T09:36:24.820201 | 2020-12-31T08:34:52 | 2020-12-31T08:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,911 | py | import warnings
from collections import defaultdict
import torch
from torch.optim.optimizer import Optimizer
class SWA(Optimizer):
def __init__(self, optimizer, swa_freq=None, swa_lr_start=None):
r"""Implements Stochastic Weight Averaging (SWA).
Stochastic Weight Averaging was proposed in `Averaging Weights Leads to
Wider Optima and Better Generalization`_ by Pavel Izmailov, Dmitrii
Podoprikhin, Timur Garipov, Dmitry Vetrov and Andrew Gordon Wilson
(UAI 2018).
SWA is implemented as a wrapper class taking optimizer instance as input
and applying SWA on top of that optimizer.
SWA can be used in two modes: automatic and manual. In the automatic
mode SWA running averages are automatically updated every
:attr:`swa_freq` steps after :attr:`swa_start` steps of optimization. If
:attr:`swa_lr` is provided, the learning rate of the optimizer is reset
to :attr:`swa_lr` at every step starting from :attr:`swa_start`. To use
SWA in automatic mode provide values for both :attr:`swa_start` and
:attr:`swa_freq` arguments.
Alternatively, in the manual mode, use :meth:`update_swa` or
:meth:`update_swa_group` methods to update the SWA running averages.
In the end of training use `swap_swa_sgd` method to set the optimized
variables to the computed averages.
Args:
swa_freq (int): number of steps between subsequent updates of
SWA running averages in automatic mode; if None, manual mode is
selected (default: None)
swa_lr (float): learning rate to use starting from step swa_start
in automatic mode; if None, learning rate is not changed
(default: None)
Examples:
>>> # automatic mode
>>> base_opt = torch.optim.SGD(model.parameters(), lr=0.1)
>>> opt = SWA(base_opt, swa_start=10, swa_freq=5, swa_lr=0.05)
>>> for _ in range(100):
>>> opt.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> opt.step()
>>> opt.swap_swa_param()
>>> # manual mode
>>> opt = SWA(base_opt)
>>> for i in range(100):
>>> opt.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> opt.step()
>>> if i > 10 and i % 5 == 0:
>>> opt.update_swa()
>>> opt.swap_swa_param()
.. note::
SWA does not support parameter-specific values of :attr:`swa_start`,
:attr:`swa_freq` or :attr:`swa_lr`. In automatic mode SWA uses the
same :attr:`swa_start`, :attr:`swa_freq` and :attr:`swa_lr` for all
parameter groups. If needed, use manual mode with
:meth:`update_swa_group` to use different update schedules for
different parameter groups.
.. note::
Call :meth:`swap_swa_sgd` in the end of training to use the computed
running averages.
.. note::
If you are using SWA to optimize the parameters of a Neural Network
containing Batch Normalization layers, you need to update the
:attr:`running_mean` and :attr:`running_var` statistics of the
Batch Normalization module. You can do so by using
`torchcontrib.optim.swa.bn_update` utility.
.. note::
See the blogpost
https://pytorch.org/blog/stochastic-weight-averaging-in-pytorch/
for an extended description of this SWA implementation.
.. note::
The repo https://github.com/izmailovpavel/contrib_swa_examples
contains examples of using this SWA implementation.
.. _Averaging Weights Leads to Wider Optima and Better Generalization:
https://arxiv.org/abs/1803.05407
.. _Improving Consistency-Based Semi-Supervised Learning with Weight
Averaging:
https://arxiv.org/abs/1806.05594
"""
self._auto_mode, (self.swa_freq,) = self._check_params(swa_freq)
self.swa_lr_start = swa_lr_start
if self._auto_mode:
if swa_freq < 1:
raise ValueError("Invalid swa_freq: {}".format(swa_freq))
else:
if self.swa_lr_start is not None:
warnings.warn(
"Swa_freq is None, ignoring swa_lr")
# If not in auto mode make all swa parameters None
self.swa_lr_start = None
self.swa_freq = None
if self.swa_lr_start is not None and self.swa_lr_start < 0:
raise ValueError("Invalid SWA learning rate factor: {}".format(swa_lr_start))
self.optimizer = optimizer
self.defaults = self.optimizer.defaults
self.param_groups = self.optimizer.param_groups
self.state = defaultdict(dict)
self.opt_state = self.optimizer.state
for group in self.param_groups:
group['n_avg'] = 0
group['step_counter'] = 0
@staticmethod
def _check_params(swa_freq):
params = [swa_freq]
params_none = [param is None for param in params]
if not all(params_none) and any(params_none):
warnings.warn(
"Some of swa_start, swa_freq is None, ignoring other")
for i, param in enumerate(params):
if param is not None and not isinstance(param, int):
params[i] = int(param)
warnings.warn("Casting swa_start, swa_freq to int")
return not any(params_none), params
def reset_lr_to_swa(self):
for param_group in self.param_groups:
param_group['initial_lr'] = self.swa_lr_start #* param_group['lr']
def update_swa_group(self, group):
r"""Updates the SWA running averages for the given parameter group.
Arguments:
group (dict): Specifies for what parameter group SWA running
averages should be updated
Examples:
>>> # automatic mode
>>> base_opt = torch.optim.SGD([{'params': [x]},
>>> {'params': [y], 'lr': 1e-3}], lr=1e-2, momentum=0.9)
>>> opt = torchcontrib.optim.SWA(base_opt)
>>> for i in range(100):
>>> opt.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> opt.step()
>>> if i > 10 and i % 5 == 0:
>>> # Update SWA for the second parameter group
>>> opt.update_swa_group(opt.param_groups[1])
>>> opt.swap_swa_param()
"""
for p in group['params']:
param_state = self.state[p]
if 'swa_buffer' not in param_state:
param_state['swa_buffer'] = torch.zeros_like(p.data)
buf = param_state['swa_buffer']
virtual_decay = 1 / float(group["n_avg"] + 1)
diff = (p.data - buf) * virtual_decay
buf.add_(diff)
group["n_avg"] += 1
def update_swa(self):
r"""Updates the SWA running averages of all optimized parameters.
"""
for group in self.param_groups:
self.update_swa_group(group)
def swap_swa_param(self):
r"""Swaps the values of the optimized variables and swa buffers.
It's meant to be called in the end of training to use the collected
swa running averages. It can also be used to evaluate the running
averages during training; to continue training `swap_swa_sgd`
should be called again.
"""
for group in self.param_groups:
for p in group['params']:
param_state = self.state[p]
if 'swa_buffer' not in param_state:
# If swa wasn't applied we don't swap params
warnings.warn(
"SWA wasn't applied to param {}; skipping it".format(p))
continue
buf = param_state['swa_buffer']
tmp = torch.empty_like(p.data)
tmp.copy_(p.data)
p.data.copy_(buf)
buf.copy_(tmp)
def step(self, closure=None):
r"""Performs a single optimization step.
In automatic mode also updates SWA running averages.
"""
loss = self.optimizer.step(closure)
for group in self.param_groups:
group["step_counter"] += 1
steps = group["step_counter"]
if self._auto_mode:
if steps % self.swa_freq == 0:
self.update_swa_group(group)
return loss
def state_dict(self):
r"""Returns the state of SWA as a :class:`dict`.
It contains three entries:
* opt_state - a dict holding current optimization state of the base
optimizer. Its content differs between optimizer classes.
* swa_state - a dict containing current state of SWA. For each
optimized variable it contains swa_buffer keeping the running
average of the variable
* param_groups - a dict containing all parameter groups
"""
opt_state_dict = self.optimizer.state_dict()
swa_state = {(id(k) if isinstance(k, torch.Tensor) else k): v
for k, v in self.state.items()}
opt_state = opt_state_dict["state"]
param_groups = opt_state_dict["param_groups"]
return {"opt_state": opt_state, "swa_state": swa_state,
"param_groups": param_groups}
def load_state_dict(self, state_dict):
r"""Loads the optimizer state.
Args:
state_dict (dict): SWA optimizer state. Should be an object returned
from a call to `state_dict`.
"""
swa_state_dict = {"state": state_dict["swa_state"],
"param_groups": state_dict["param_groups"]}
opt_state_dict = {"state": state_dict["opt_state"],
"param_groups": state_dict["param_groups"]}
super(SWA, self).load_state_dict(swa_state_dict)
self.optimizer.load_state_dict(opt_state_dict)
self.opt_state = self.optimizer.state
def add_param_group(self, param_group):
r"""Add a param group to the :class:`Optimizer` s `param_groups`.
This can be useful when fine tuning a pre-trained network as frozen
layers can be made trainable and added to the :class:`Optimizer` as
training progresses.
Args:
param_group (dict): Specifies what Tensors should be optimized along
with group specific optimization options.
"""
param_group['n_avg'] = 0
param_group['step_counter'] = 0
self.optimizer.add_param_group(param_group)
| [
"chengpeng8@jd.com"
] | chengpeng8@jd.com |
fc1a2897b55e9c6109a9729b245562e9d13b8022 | 347c70d4851b568e03e83387f77ae81071ab739e | /older/rc-query-rest/tests/test_rest_query.py | 5974c1291876236f288ae59b86951e2be8b4d673 | [
"MIT"
] | permissive | neetinkandhare/resilient-community-apps | 59d276b5fb7a92872143ce2b94edd680738693ce | 3ecdabe6bf2fc08f0f8e58cbe92553270d8da42f | refs/heads/master | 2021-12-27T09:05:36.563404 | 2021-09-29T13:04:56 | 2021-09-29T13:04:56 | 159,804,866 | 1 | 0 | MIT | 2021-08-03T19:45:45 | 2018-11-30T10:07:32 | Python | UTF-8 | Python | false | false | 2,446 | py | """System Integration Tests for REST Query component"""
from __future__ import print_function
import os.path
import pytest
from circuits.core.handlers import handler
data_dir = os.path.join(os.path.dirname(__file__), "rest_sample_data")
config_data = """[rest]
queue = rest
query_definitions_dir = %s
test_endpoint = http://httpbin.org/post
""" % (data_dir)
@pytest.mark.usefixtures("configure_resilient")
class TestRESTIntegrationTests:
""" System tests for the REST Query component """
# Appliance Configuration Requirements
destinations = ("rest",)
automatic_actions = {"Payload String Test": ("rest", "Incident",
({u"value": u"Payload Is String",
u"field_name": u"incident.name",
u"method": u"equals"},)),
"Payload Dict Test": ("rest", "Incident",
({u"value": u"Payload Is Dict",
u"field_name": u"incident.name",
u"method": u"equals"},))}
payload_testdata = [pytest.param("Payload Is String", "payload_string_test",
id="string_payload"),
pytest.param("Payload Is Dict", "payload_dict_test",
id="dict_payload")]
@pytest.mark.parametrize("inc_name,rule_name", payload_testdata)
def test_payload_string_or_dict(self, inc_name, rule_name, circuits_app, new_incident):
""" http-body is a string to render or a dict"""
# Incident data will be posted to HTTP Bin and then the incident name will be
# changed to the incident ID that was posted.
new_incident["name"] = inc_name
inc = circuits_app.app.action_component.rest_client().post("/incidents", new_incident)
event = circuits_app.watcher.wait(rule_name + "_success", timeout=10, channel='actions.rest')
assert event
pytest.wait_for(event, "complete", True)
event = circuits_app.watcher.wait("QueryEvent", timeout=10, channel='actions.rest')
assert event
pytest.wait_for(event, "complete", True)
updated_inc = circuits_app.app.action_component.rest_client().get("/incidents/%d" % inc["id"])
assert updated_inc["name"] == str(inc["id"])
| [
"hpyle@us.ibm.com"
] | hpyle@us.ibm.com |
92abee3a30aa9cc1fdf66e54449b2607302a919b | d3631edd8ec21471a361d8ce5561e9e0441b4cc2 | /src/event_replayer/lambda_function.py | adcee635c342576e94161636f1ab566b71b21e37 | [
"MIT"
] | permissive | fernandogoncalves-me/serverless-datalake | f62591933602ffb4405a53868480133f863a00a1 | 75eba43f41d64fe20dbe4a1890830a75a0825316 | refs/heads/master | 2023-04-13T03:20:19.778752 | 2020-04-13T14:37:31 | 2020-04-13T14:37:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,717 | py |
import boto3
import datetime
import json
import logging
import os
TABLE_NAME = os.getenv('TABLE_NAME')
QUEUE_URL = os.getenv('QUEUE_URL')
logger = logging.getLogger()
logger.setLevel(os.getenv('LOGLEVEL') or 'INFO')
def get_metadata_from_catalog(source, interval_start, interval_end):
ddb = boto3.client('dynamodb')
metadata = ddb.query(
TableName=TABLE_NAME,
KeyConditions={
'Source': {
'AttributeValueList': [
{
'S': source
}
],
'ComparisonOperator': 'EQ'
},
'Timestamp': {
'AttributeValueList': [
{
'S': interval_start
},
{
'S': interval_end
}
],
'ComparisonOperator': 'BETWEEN'
}
}
)
return metadata['Items']
def send_messages(objects_metadata):
sqs = boto3.client('sqs')
for object_metada in objects_metadata:
sqs.send_message(
QueueUrl=QUEUE_URL,
MessageBody=json.dumps(
{
'Replay': object_metada
}
)
)
def lambda_handler(event, context):
logger.info("Received event: {}".format(event))
replay = json.loads(event['body'])
objects_metadata = get_metadata_from_catalog(
replay['Source'], replay['IntervalStart'], replay['IntervalEnd'])
send_messages(objects_metadata)
return {
"headers": {
"Content-Type": "application/json"
},
"statusCode": 202
}
| [
"fernandosg88@gmail.com"
] | fernandosg88@gmail.com |
891495601cb3a5d90cb691273f4b2eaf72768690 | 26ec25767ecc5f24f62a0778c3d127b5abc70c18 | /lib/charmhelpers/core/templating.py | 2c63885319aef318789efea8b5e37eec8e502ef4 | [] | no_license | AdamIsrael/reddit-charm | 892e39291b367ee506e54078b23825d0ba02ca87 | a41f1e76b205a1bf916a138a689940d1ea4c4bc1 | refs/heads/master | 2021-01-19T15:27:27.265332 | 2015-03-21T03:10:33 | 2015-03-21T03:10:33 | 21,801,514 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | import os
from charmhelpers.core import host
from charmhelpers.core import hookenv
def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
"""
Render a template.
The `source` path, if not absolute, is relative to the `templates_dir`.
The `target` path should be absolute.
The context should be a dict containing the values to be replaced in the
template.
The `owner`, `group`, and `perms` options will be passed to `write_file`.
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
Note: Using this requires python-jinja2; if it is not installed, calling
this will attempt to use charmhelpers.fetch.apt_install to install it.
"""
try:
from jinja2 import FileSystemLoader, Environment, exceptions
except ImportError:
try:
from charmhelpers.fetch import apt_install
except ImportError:
hookenv.log('Could not import jinja2, and could not import '
'charmhelpers.fetch to install it',
level=hookenv.ERROR)
raise
apt_install('python-jinja2', fatal=True)
from jinja2 import FileSystemLoader, Environment, exceptions
if templates_dir is None:
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
loader = Environment(loader=FileSystemLoader(templates_dir))
try:
source = source
template = loader.get_template(source)
except exceptions.TemplateNotFound as e:
hookenv.log('Could not load template %s from %s.' %
(source, templates_dir),
level=hookenv.ERROR)
raise e
content = template.render(context)
host.mkdir(os.path.dirname(target))
host.write_file(target, content, owner, group, perms)
| [
"adam@adamisrael.com"
] | adam@adamisrael.com |
6947ec91cff5c0f614614ebc46ea20f20d4175cd | 812b4819aff718230c978c01f3633fe1f701e160 | /BalancedForest.py | 867f1bd449136a6d734e99ddf865a30ec6698115 | [] | no_license | Santhilata/Python-scripts | e9bda666841078db575cb1e6709bed4a55cc3836 | 42e6047b158b88eb5df666ee94e7d05ba334c27d | refs/heads/main | 2023-01-08T16:40:53.006333 | 2020-11-02T09:00:57 | 2020-11-02T09:00:57 | 309,295,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 654 | py | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 2 08:49:35 2020
@author: santhilata
input
2
5
1 2 2 1 1
1 2
1 3
3 5
1 4
3
1 3 5
1 3
1 2
output
2
-1
"""
def balancedForest(c, edges):
if (len(edges) < 2):
return -1 # The tree is far too small
n = len(c)
for node in range(n):
if __name__ == '__main__':
q = int(input())
for q_itr in range(q):
n = int(input())
c = list(map(int, input().rstrip().split()))
edges = []
for _ in range(n - 1):
edges.append(list(map(int, input().rstrip().split())))
result = balancedForest(c, edges)
| [
"santhilata.venkata@nationalarchives.gov.uk"
] | santhilata.venkata@nationalarchives.gov.uk |
6fe927c97dff9f30388d7e764421f05e3e9ffa5d | a671cd15450f3becbfb7af7ab190af2a840e6fb2 | /7_type_casting_str.py | 6e03ef3c7ef07d5ef426ded8584f4158e066268e | [] | no_license | mohammedaasem/PythonBasicPrograms | c9cc467d7d334b99886f5782a1711e203b6da5da | e681dcdfbcb5843abe9e6b6aa8fae5e709c69545 | refs/heads/master | 2020-07-01T07:39:16.198957 | 2019-08-21T11:14:13 | 2019-08-21T11:14:13 | 201,092,309 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | a=1
b=7.6
c=True
print(str(a))
print(str(b))
print(str(c)) | [
"imohammedaasem@gmail.com"
] | imohammedaasem@gmail.com |
1734d731ff48e8fcb9e921086572cb3c89830ab7 | 86df126a63d476298faff3096d2e0f925dee70c4 | /media/chinmoy/AlgorithmsArchive/LinkedList.py | 51c1729bce038821972b97855c36f431765d7e50 | [] | no_license | SabikAbtahee/BYTE-PRO | 0b76f32efaa47855d2386c2d3abc9ce96099f25e | a84ad04241d63e9d2104c554319d2023c4864b0c | refs/heads/master | 2022-12-25T19:30:22.742603 | 2019-04-27T07:37:13 | 2019-04-27T07:37:13 | 124,196,353 | 4 | 3 | null | 2022-12-11T06:00:11 | 2018-03-07T07:26:17 | JavaScript | UTF-8 | Python | false | false | 11,343 | py |
<<<<<<<<<< Sabik
class Node():
def __init__(self,data):
self.data=data
self.nextNode= None
self.prevNode = None
def getData(self):
return self.data
def setData(self,data):
self.data=data
def getNextNode(self):
return self.nextNode
def setNextNode(self,nextNode):
self.nextNode=nextNode
def getPrevNode(self):
return self.prevNode
def setPrevNode(self,prevNode):
self.prevNode=prevNode
class LinkedList:
def __init__(self):
self.root = None
self.size = 0
def addNodeInTail(self,data):
tempNode = self.root
tempNode2 = self.root
if(tempNode == None):
self.root = Node(data)
self.root.setPrevNode(None)
self.root.setNextNode(None)
else:
while(tempNode.getNextNode() != None):
tempNode2 = tempNode
tempNode = tempNode.getNextNode()
tempNode.setPrevNode(tempNode2)
tempNode.setNextNode(Node(data))
tempNode.getNextNode().setPrevNode(tempNode)
tempNode.getNextNode().setNextNode(None)
# tempNode.setNextNode(None)
# tempNode2.setNextNode(tempNode.getNextNode())
self.size+=1
def addNodeInTheHead(self,data):
tempNode = self.root
if(tempNode==None):
self.root = Node(data)
self.root.setPrevNode(None)
self.root.setNextNode(None)
else:
newNode = Node(data)
newNode.setNextNode(self.root)
newNode.setPrevNode(None)
self.root=newNode
tempNode.setPrevNode(self.root)
self.size+=1
def InsertNth(self, head, data, position):
index = position
newNode = Node(data)
tempNode = self.root
if(tempNode==None):
self.root = newNode
else:
if(index == 0):
newNode.setNextNode(self.root)
self.root.setPrevNode(newNode)
self.root = newNode
else:
count = 0
flag = False
while tempNode.getNextNode() != None:
if (count == index):
newNode.setNextNode(tempNode)
tempNode.getPrevNode().setNextNode(newNode)
tempNode.setPrevNode(newNode)
newNode.setPrevNode(tempNode.getPrevNode())
flag = True
tempNode = tempNode.getNextNode()
count += 1
if (flag == False):
newNode.setNextNode(tempNode)
tempNode.getPrevNode().setNextNode(newNode)
tempNode.setPrevNode(newNode)
newNode.setPrevNode(tempNode.getPrevNode())
flag = True
newNode.setPrevNode(tempNode)
tempNode.setNextNode(newNode)
if (flag == False):
newNode.setPrevNode(tempNode)
tempNode.setNextNode(newNode)
self.size += 1
def insertNodeAtIndex(self, index,data):
if(index>=0 and index<self.size):
tempNode = self.root
flag = False
count = 0
newNode = Node(data)
if (index == 0):
newNode.setNextNode(self.root)
self.root.setPrevNode(newNode)
self.root = newNode
else:
# it will execute index 1 to size-1 . but index 1 will not be counted
while tempNode.getNextNode() != None:
# print(count,index)
if (count == index):
# print(tempNode.)
newNode.setNextNode(tempNode)
tempNode.getPrevNode().setNextNode(newNode)
tempNode.setPrevNode(newNode)
newNode.setPrevNode(tempNode.getPrevNode())
flag = True
tempNode = tempNode.getNextNode()
count += 1
if (flag == False):
print(tempNode.getData())
newNode.setPrevNode(tempNode)
tempNode.setNextNode(newNode)
self.size += 1
elif(index >= self.size): print("Index must be in 0 -", self.size-1, "range.")
else: print("Index must be in positive")
def insertNodeSorted(self, data):
tempNode = self.root
flag = False
count = 0
newNode = Node(data)
if(tempNode==None):
self.root = newNode
self.size += 1
else:
if (newNode.getData() < tempNode.getData()):
newNode.setNextNode(self.root)
self.root.setPrevNode(newNode)
self.root = newNode
else:
# it will execute index 1 to size-1 . but index 1 will not be counted
while tempNode.getNextNode() != None:
# print(count,index)
if (data < tempNode.getData()):
# print(tempNode.)
newNode.setNextNode(tempNode)
tempNode.getPrevNode().setNextNode(newNode)
tempNode.setPrevNode(newNode)
newNode.setPrevNode(tempNode.getPrevNode())
flag = True
tempNode = tempNode.getNextNode()
count += 1
if (flag == False):
print(tempNode.getData())
newNode.setPrevNode(tempNode)
tempNode.setNextNode(newNode)
self.size += 1
def getSize(self):
return self.size
def showList(self):
tempNode = self.root
print("[",end=' ')
while(tempNode.getNextNode()!=None):
print(tempNode.getData(),end=', ')
tempNode = tempNode.getNextNode()
print(tempNode.getData(),end='')
print(" ]")
def printList(self):
tempNode = self.root
while (tempNode.getNextNode() != None):
print(tempNode.getData(),end=' ')
tempNode = tempNode.getNextNode()
print(tempNode.getData())
def getHead(self): #return data only
return self.root
ll = LinkedList()
n = int(input())
for i in range(n):
n, m = input().strip().split(' ')
ll.InsertNth(None,int(n),int(m))
ll.printList()
# print('executed',int(n),int(m))
#################################
print('loop end')
# ll.showListReversely()
if (count == index):
newNode.setNextNode(tempNode)
tempNode.getPrevNode().setNextNode(newNode)
tempNode.setPrevNode(newNode)
newNode.setPrevNode(tempNode.getPrevNode())
flag = True
tempNode = tempNode.getNextNode()
count += 1
if (flag == False):
newNode.setNextNode(tempNode)
tempNode.getPrevNode().setNextNode(newNode)
tempNode.setPrevNode(newNode)
newNode.setPrevNode(tempNode.getPrevNode())
flag = True
newNode.setPrevNode(tempNode)
tempNode.setNextNode(newNode)
if (flag == False):
newNode.setPrevNode(tempNode)
tempNode.setNextNode(newNode)
self.size += 1
def insertNodeAtIndex(self, index,data):
if(index>=0 and index<self.size):
tempNode = self.root
flag = False
count = 0
newNode = Node(data)
if (index == 0):
newNode.setNextNode(self.root)
self.root.setPrevNode(newNode)
self.root = newNode
else:
# it will execute index 1 to size-1 . but index 1 will not be counted
while tempNode.getNextNode() != None:
# print(count,index)
if (count == index):
# print(tempNode.)
newNode.setNextNode(tempNode)
tempNode.getPrevNode().setNextNode(newNode)
tempNode.setPrevNode(newNode)
newNode.setPrevNode(tempNode.getPrevNode())
flag = True
tempNode = tempNode.getNextNode()
count += 1
if (flag == False):
print(tempNode.getData())
newNode.setPrevNode(tempNode)
tempNode.setNextNode(newNode)
self.size += 1
elif(index >= self.size): print("Index must be in 0 -", self.size-1, "range.")
else: print("Index must be in positive")
def insertNodeSorted(self, data):
tempNode = self.root
flag = False
count = 0
newNode = Node(data)
if(tempNode==None):
self.root = newNode
self.size += 1
else:
if (newNode.getData() < tempNode.getData()):
newNode.setNextNode(self.root)
self.root.setPrevNode(newNode)
self.root = newNode
else:
# it will execute index 1 to size-1 . but index 1 will not be counted
while tempNode.getNextNode() != None:
# print(count,index)
if (data < tempNode.getData()):
# print(tempNode.)
newNode.setNextNode(tempNode)
tempNode.getPrevNode().setNextNode(newNode)
tempNode.setPrevNode(newNode)
newNode.setPrevNode(tempNode.getPrevNode())
flag = True
tempNode = tempNode.getNextNode()
count += 1
if (flag == False):
print(tempNode.getData())
newNode.setPrevNode(tempNode)
tempNode.setNextNode(newNode)
self.size += 1
def getSize(self):
return self.size
def showList(self):
tempNode = self.root
print("[",end=' ')
while(tempNode.getNextNode()!=None):
print(tempNode.getData(),end=', ')
tempNode = tempNode.getNextNode()
print(tempNode.getData(),end='')
print(" ]")
def printList(self):
tempNode = self.root
while (tempNode.getNextNode() != None):
print(tempNode.getData(),end=' ')
tempNode = tempNode.getNextNode()
print(tempNode.getData())
def getHead(self): #return data only
return self.root
ll = LinkedList()
n = int(input())
for i in range(n):
n, m = input().strip().split(' ')
ll.InsertNth(None,int(n),int(m))
ll.printList()
# print('executed',int(n),int(m))
#################################
print('loop end')
# ll.showListReversely()
>>>>>>>>>>
| [
"chinmoyacharjee15@gmail.com"
] | chinmoyacharjee15@gmail.com |
7128c64d41dbd9f583a0740cc04da47b47c793ca | 990210dce02dde1ec7cebb325a1cc83ecff95eb9 | /examplenn/11linenn.py | 0771836d886057212f9895edc5b8444a4ab01852 | [] | no_license | mogwai/learningml | 689e5a3a349b75473f62dc8b2ffe6f10e599dc07 | 61961a612e06f180be7515b961f18512ecb03649 | refs/heads/master | 2020-03-19T04:27:52.975661 | 2018-06-04T15:04:43 | 2018-06-04T15:04:43 | 135,831,467 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | # https://iamtrask.github.io/2015/07/12/basic-python-network/
# Trains a NN
import numpy as np
print("Started")
X = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
y = np.array([[0, 1, 1, 0]]).T
syn0 = 2*np.random.random((3, 4)) - 1
syn1 = 2*np.random.random((4, 1)) - 1
for i in range(60000):
l1 = 1/(1+np.exp(-(np.dot(X, syn0))))
l2 = 1/(1+np.exp(-(np.dot(l1, syn1))))
l2_delta = (y - l2)*(l2*(1-l2))
l1_delta = l2_delta.dot(syn1.T) * (l1 * (1-l1))
syn1 += l1.T.dot(l2_delta)
syn0 += X.T.dot(l1_delta)
print("Finished")
| [
"himion0@gmail.com"
] | himion0@gmail.com |
ea10f526b1385f7e048a418e5d749d1d83805f90 | 0a7c86329c509bf324ef165cdd8e839e4991fefa | /color.pyde | 5ff60dca2c68a84c5d8a2239d7e657a96c7e5c8b | [] | no_license | Beloved1/color | 82d8b54e6fddfe3fe2b946967fc0aae269b8f2a9 | 7e96d337c76c932d7000803aeb18e99ac65c5ce2 | refs/heads/master | 2020-03-23T11:18:52.380359 | 2018-07-18T22:31:36 | 2018-07-18T22:31:36 | 141,496,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | pyde | def setup():
size(400, 400)
dog = loadImage("dog.jpg")
image (dog, 0, 0)
loadPixels()
print(red(pixels[0]), green(pixels[0]), blue(pixels[0]))
print(red(pixels[57]), green(pixels[57]), blue(pixels[57]))
c = color(255, 255 , 0)
pixels[57] = c
updatePixels()
print(red(pixels[57]), green(pixels[57]), blue(pixels[57]))
| [
"obafemielegbede@gmail.com"
] | obafemielegbede@gmail.com |
58abc4b1b7819ca83c47d829f036934ed54e49e7 | bf7959048edc0005e04431a0864c719adc5ea9ea | /python版本/451-FrequencySort.py | def3b0ce4fd72584a4725058697bf09520d70677 | [] | no_license | Yohager/Leetcode | 7c24f490cfa5fd8e3cdb09e5a2305a134a064a93 | 585af82ff2c2d534053f6886714406019ed0c7d1 | refs/heads/master | 2022-12-07T23:51:16.347174 | 2022-11-28T02:30:53 | 2022-11-28T02:30:53 | 178,201,848 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 223 | py | class Solution:
def frequencySort(self, s: str) -> str:
c = collections.Counter(s)
n = len(c.keys())
ans = ''
for x in c.most_common(n):
ans += x[0] * x[1]
return ans | [
"guoyuhang0921@gmail.com"
] | guoyuhang0921@gmail.com |
1d1bce381708be4fc64b894ae43fcf0a22f2e34e | 6ee9a46a95a504cf91eb5031b180f2d6c6cc9d98 | /cut_rod.py | f4f900ef0683dad36b563fa62f8a127caac380dd | [] | no_license | rohitmungre/dynamic_programming | 8dc952f9f83e15a9b6eae8eef0e509da1c2add97 | 1d1f8036f5f6066bdc39436ace8132208466541e | refs/heads/master | 2020-08-01T22:37:25.817167 | 2019-11-20T05:33:11 | 2019-11-20T05:33:11 | 211,140,758 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 689 | py | rod = 7
sz = [1,2,3,4]
vl = [2,5,7,8]
def cut_rod_dp(sz, vl, rod, idx, memo):
if rod<= 0:
return 0
if idx <0:
return 0
tval = 0
varr = []
while rod >= 0:
varr.append(tval+cut_rod_dp(sz, vl, rod, idx-1, memo))
rod = rod - sz[idx]
tval = tval + vl[idx]
return max(varr)
def cut_rod(sz, vl, rod, idx):
if rod<= 0:
return 0
if idx <0:
return 0
tval = 0
varr = []
while rod >= 0:
varr.append(tval+cut_rod(sz, vl, rod, idx-1))
rod = rod - sz[idx]
tval = tval + vl[idx]
return max(varr)
print(cut_rod_dp(sz, vl, rod, 3, {}))
| [
"noreply@github.com"
] | noreply@github.com |
8b1e781ba56cc29faae63e1da5b0d6d1feb5f6f3 | f3159749142a9b143c622780bbb414d08f71fead | /pyban/tickets/migrations/0007_auto_20210606_1901.py | d88f46964b8f54f16add3497d1a724111439504e | [
"MIT"
] | permissive | abderrahmen-hadjadj-aoul/pyban | 2a7e72ffb274f3b0487830c7762c90ad585b5032 | 82fe3f0bcf36880b710bbf617f2a7e6b1097f80c | refs/heads/main | 2023-06-02T14:30:11.990771 | 2021-06-07T11:02:21 | 2021-06-07T11:02:21 | 373,596,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 382 | py | # Generated by Django 3.1.2 on 2021-06-06 19:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tickets', '0006_ticket_board'),
]
operations = [
migrations.AlterField(
model_name='ticket',
name='description',
field=models.TextField(blank=True),
),
]
| [
"abder78@gmail.com"
] | abder78@gmail.com |
53db8753d8c4e718450caf4aedd4c34c6bf8bbe6 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /BitPim/rev2895-2929/rev2895-2929/playlist.py | 80a59694380a0966cc13538dfbfcc9752a490f64 | [] | no_license | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 13,937 | py | """
Code to handle Playlist items.
The playlist data includes 2 components: the list of available songs, and
the playlist items.
The format of the Playlist items is standardized. It is a list of dict which
has the following standard fields:
name: string=the name of the play list
type: string=the type of this play list. Current supported types are mp3 and wma.
songs: [ 'song name', ... ]
To implement Playlist read/write for a phone module:
1. Add 2 entries into Profile._supportedsyncs:
...
('playlist', 'read', 'OVERWRITE'),
('playlist', 'write', 'OVERWRITE'),
2. Implement the following 2 methods in your Phone class:
def getplaylist(self, result)
def saveplaylist(self, result, merge)
The result dict should have:
results[playlist.masterlist_key]=['song name 1', 'song name 2', ...]
results[playlist.playlist_key=[playlist.PlaylistEntry, playlist.PlaylistEntry, ..]
"""
import wx
import wx.gizmos as gizmos
import database
import helpids
playlist_key='playlist'
masterlist_key='masterlist'
playlists_list='playlists'
mp3_type='mp3'
wma_type='wma'
playlist_type=(mp3_type, wma_type)
class MasterListDataObject (database.basedataobject) :
_knownproperties=[]
_knownlistproperties=database.basedataobject._knownlistproperties.copy()
_knownlistproperties.update({ 'masterlist': ['name'] })
def __init__(self, data=None):
if data is None or not isinstance(data, (list, tuple)):
return
self.update({'masterlist': [{ 'name': x } for x in data] })
_knownlistproperties.update({ 'masterlist': ['name'] })
masterlistobjectfactory=database.dataobjectfactory(MasterListDataObject)
class PlaylistDataObject (database.basedataobject) :
_knownproperties=[]
_knownlistproperties=database.basedataobject._knownlistproperties.copy()
_knownlistproperties.update( { 'playlist': ['name'] })
def __init__(self, data=None):
if data is None or not isinstance(data, (list, tuple)):
return
self.update({'playlist': [{'name': x} for x in data]})
_knownlistproperties.update( { 'playlist': ['name'] })
playlistobjectfactory=database.dataobjectfactory(PlaylistDataObject)
class PlaylistEntryDataObject (database.basedataobject) :
_knownproperties=['type']
_knownlistproperties=database.basedataobject._knownlistproperties.copy()
_knownlistproperties.update({ 'songs': ['name']})
def __init__(self, data=None):
if data is None or not isinstance(data, PlaylistEntry):
return
self.update(data.get_db_dict())
_knownlistproperties.update({ 'songs': ['name']})
playlistentryobjectfactory=database.dataobjectfactory(PlaylistEntryDataObject)
class PlaylistEntry (object) :
def __init__(self):
self._data={ 'serials': [] }
def get(self):
return copy.deepcopy(self._data, {})
def set(self, d):
self._data={}
self._data.update(d)
def get_db_dict(self):
return { 'type': self.pl_type,
'songs': [{ 'name': x } for x in self.songs] }
def set_db_dict(self, d):
self.pl_type=d.get('type', None)
self.songs=[x['name'] for x in d.get('songs', [])]
def _set_or_del(self, key, v, v_list=[]):
if v is None or v in v_list:
if self._data.has_key(key):
del self._data[key]
else:
self._data[key]=v
def _get_name(self):
return self._data.get('name', '')
def _set_name(self, v):
self._set_or_del('name', v, [''])
name=property(fget=_get_name, fset=_set_name)
def _get_type(self):
return self._data.get('type', '')
def _set_type(self, v):
self._set_or_del('type', v, [''])
pl_type=property(fget=_get_type, fset=_set_type)
def _get_songs(self):
return self._data.get('songs', [])
def _set_songs(self, v):
self._set_or_del('songs', v, [[]])
songs=property(fget=_get_songs, fset=_set_songs)
class PlaylistWidget (wx.Panel) :
def __init__(self, mainwindow, parent):
super(PlaylistWidget, self).__init__(parent, -1)
self._mw=mainwindow
self._data=[]
self._master=[]
self.ignoredirty=False
self.dirty=False
vbs=wx.BoxSizer(wx.VERTICAL)
hbs=wx.BoxSizer(wx.HORIZONTAL)
self._item_list=gizmos.EditableListBox(self, -1, 'Play Lists:',
style=gizmos.EL_ALLOW_NEW|\
gizmos.EL_ALLOW_EDIT|\
gizmos.EL_ALLOW_DELETE)
self._item_list.GetUpButton().Show(False)
self._item_list.GetDownButton().Show(False)
self._item_list_w=self._item_list.GetListCtrl()
hbs.Add(self._item_list, 1, wx.EXPAND|wx.ALL, border=5)
hbs.Add(wx.StaticLine(self, -1, style=wx.LI_VERTICAL), 0,
wx.EXPAND|wx.ALL, 5)
hbs1=wx.BoxSizer(wx.HORIZONTAL)
self._pl_list=gizmos.EditableListBox(self, -1, "Play List Content:",
style=gizmos.EL_ALLOW_DELETE)
self._pl_list_w=self._pl_list.GetListCtrl()
hbs1.Add(self._pl_list, 1, wx.EXPAND|wx.ALL, 5)
_add_btn=wx.Button(self, -1, '<-Add')
hbs1.Add(_add_btn, 0, wx.ALL, 5)
self._master_list=gizmos.EditableListBox(self, -1, 'Available Songs:', style=0)
self._master_list_w=self._master_list.GetListCtrl()
self._master_list.GetUpButton().Show(False)
self._master_list.GetDownButton().Show(False)
hbs1.Add(self._master_list, 1, wx.EXPAND|wx.ALL, 5)
hbs.Add(hbs1, 3, wx.EXPAND|wx.ALL, 5)
hbs1=wx.BoxSizer(wx.HORIZONTAL)
self._save_btn=wx.Button(self, wx.NewId(), "Save")
self._revert_btn=wx.Button(self, wx.NewId(), "Revert")
help_btn=wx.Button(self, wx.ID_HELP, "Help")
hbs1.Add(self._save_btn, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
hbs1.Add(help_btn, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
hbs1.Add(self._revert_btn, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
vbs.Add(hbs, 1, wx.EXPAND|wx.ALL, 5)
vbs.Add(wx.StaticLine(self, -1), 0, wx.EXPAND|wx.TOP|wx.BOTTOM, 5)
vbs.Add(hbs1, 0, wx.ALIGN_CENTRE|wx.ALL, 5)
self.SetSizer(vbs)
self.SetAutoLayout(True)
vbs.Fit(self)
wx.EVT_LIST_ITEM_SELECTED(self._item_list, self._item_list_w.GetId(),
self.OnPlaylistSelected)
wx.EVT_LIST_BEGIN_LABEL_EDIT(self._item_list, self._item_list_w.GetId(),
self.OnStartLabelChanged)
wx.EVT_LIST_END_LABEL_EDIT(self._item_list, self._item_list_w.GetId(),
self.OnLabelChanged)
wx.EVT_BUTTON(self, _add_btn.GetId(), self.OnAdd2Playlist)
wx.EVT_BUTTON(self, self._save_btn.GetId(), self.OnSave)
wx.EVT_BUTTON(self, self._revert_btn.GetId(), self.OnRevert)
wx.EVT_LIST_DELETE_ITEM(self._item_list, self._item_list_w.GetId(),
self.OnMakeDirty)
wx.EVT_LIST_DELETE_ITEM(self._pl_list, self._pl_list_w.GetId(),
self.OnMakeDirty)
wx.EVT_BUTTON(self, wx.ID_HELP,
lambda _: wx.GetApp().displayhelpid(helpids.ID_TAB_PLAYLIST))
self._populate()
self.setdirty(False)
def setdirty(self, val):
if self.ignoredirty:
return
self.dirty=val
self._item_list.Enable(not self.dirty)
self._save_btn.Enable(self.dirty)
self._revert_btn.Enable(self.dirty)
def _clear(self, clear_master=True):
self._item_list_w.DeleteAllItems()
self._pl_list_w.DeleteAllItems()
if clear_master:
self._master_list_w.DeleteAllItems()
def _populate_master(self):
self._master_list.SetStrings(self._master)
def _populate_pl_list(self):
self._item_list_w.DeleteAllItems()
if self._data:
self._item_list.SetStrings([e.name for e in self._data])
else:
self._item_list.SetStrings([])
def _name2idx(self, name):
for i,e in enumerate(self._data):
if e.name==name:
return i
def _populate_each(self, name):
self._pl_list_w.DeleteAllItems()
if name is None:
return
self.ignoredirty=True
_list_idx=self._name2idx(name)
if _list_idx is not None:
self._pl_list.SetStrings(self._data[_list_idx].songs)
self.ignoredirty=False
if not self.dirty:
self.setdirty(False)
def _populate(self):
self._populate_master()
self._populate_pl_list()
def populate(self, dict):
self._data=dict.get(playlist_key, [])
self._master=dict.get(masterlist_key, [])
self._clear()
self._populate()
def _save_to_db(self, dict):
db_rr={ masterlist_key: MasterListDataObject(dict.get(masterlist_key, [])) }
database.ensurerecordtype(db_rr, masterlistobjectfactory)
self._mw.database.savemajordict(masterlist_key, db_rr)
_pl_list=dict.get(playlist_key, [])
db_rr={ playlists_list: PlaylistDataObject([x.name for x in _pl_list]) }
database.ensurerecordtype(db_rr, playlistobjectfactory)
self._mw.database.savemajordict(playlists_list, db_rr)
db_rr={ }
for e in _pl_list:
db_rr[e.name]=PlaylistEntryDataObject(e)
database.ensurerecordtype(db_rr, playlistentryobjectfactory)
self._mw.database.savemajordict(playlist_key, db_rr)
def populatefs(self, dict):
self._save_to_db(dict)
return dict
def getfromfs(self, result):
_master_dict=self._mw.database.getmajordictvalues(masterlist_key,
masterlistobjectfactory)
_master_dict=_master_dict.get(masterlist_key, {})
result.update( { masterlist_key: \
[x['name'] for x in _master_dict.get(masterlist_key, [])] })
_pl_list_dict=self._mw.database.getmajordictvalues(playlists_list,
playlistobjectfactory)
_pl_list_dict=_pl_list_dict.get(playlists_list, {})
_pl_entries_dict=self._mw.database.getmajordictvalues(playlist_key,
playlistentryobjectfactory)
_pl_list=[]
for e in _pl_list_dict.get(playlist_key, []):
_pl_entry=_pl_entries_dict.get(e['name'], None)
if _pl_entry:
_entry=PlaylistEntry()
_entry.name=e['name']
_entry.type=_pl_entry['type']
_entry.songs=[x['name'] for x in _pl_entry['songs']]
_pl_list.append(_entry)
result.update({playlist_key: _pl_list })
return result
def OnMakeDirty(self, _=None):
"""A public function you can call that will set the dirty flag"""
if self.dirty or self.ignoredirty:
return
print 'OnMakeDirty'
self.setdirty(True)
def OnPlaylistSelected(self, evt):
self._populate_each(evt.GetLabel())
evt.Skip()
def OnDirty(self, _):
self.setdirty(True)
def _change_playlist_name(self, new_name):
for e in self._data:
if e.name==self._old_name:
e.name=new_name
def _add_playlist_name(self, new_name):
_entry=PlaylistEntry()
_entry.name=new_name
self._data.append(_entry)
def OnStartLabelChanged(self, evt):
self._old_name=evt.GetLabel()
def OnLabelChanged(self, evt):
_new_name=evt.GetLabel()
if _new_name:
self.setdirty(True)
if self._old_name:
self._change_playlist_name(_new_name)
else:
self._add_playlist_name(_new_name)
evt.Skip()
def OnAdd2Playlist(self, _):
_pl_idx=self._item_list_w.GetNextItem(-1, state=wx.LIST_STATE_SELECTED)
_master_idx=self._master_list_w.GetNextItem(-1, state=wx.LIST_STATE_SELECTED)
if _pl_idx==-1 or _master_idx==-1:
return
_entry_idx=self._name2idx(self._item_list_w.GetItemText(_pl_idx))
if _entry_idx is not None:
self.setdirty(True)
self._pl_list.SetStrings(self._pl_list.GetStrings()+\
[self._master_list_w.GetItemText(_master_idx)])
def _build_playlist(self):
_pl_list=[]
for _name in self._item_list.GetStrings():
if _name:
_idx=self._name2idx(_name)
if _idx is not None:
_pl_list.append(self._data[_idx])
return _pl_list
def OnSave(self, _):
_pl_idx=self._item_list_w.GetNextItem(-1, state=wx.LIST_STATE_SELECTED)
if _pl_idx!=-1:
_entry_idx=self._name2idx(self._item_list_w.GetItemText(_pl_idx))
if _entry_idx is not None:
self._data[_entry_idx].songs=self._pl_list.GetStrings()
self._save_to_db({ masterlist_key: self._master_list.GetStrings(),
playlist_key: self._build_playlist() })
self.setdirty(False)
def OnRevert(self, _):
_pl_idx=self._item_list_w.GetNextItem(-1, state=wx.LIST_STATE_SELECTED)
_res={}
self.getfromfs(_res)
self.populate(_res)
if _pl_idx!=-1:
self._item_list_w.SetItemState(_pl_idx, wx.LIST_STATE_SELECTED,
wx.LIST_MASK_STATE)
self.setdirty(False)
def getdata(self, dict):
dict[masterlist_key]=self._master_list.GetStrings()
dict[playlist_key]=self._build_playlist()
return dict
| [
"joliebig@fim.uni-passau.de"
] | joliebig@fim.uni-passau.de |
b83ad2d4e1821a822a0a025c4c8ac3d98b9ceca2 | e87aec694108cb1f76716260daf569bcb8091958 | /fluo/db/backends/postgresql_psycopg2.py | 0dc6fcb482eacb73871660aaf300340fe45c5048 | [
"MIT"
] | permissive | rsalmaso/django-fluo | a283b8f75769ac6e57fa321c607819899e0c31c8 | 340e3b4f9c1b4b09feccefb9b3ab2d26d59fac2b | refs/heads/master | 2023-01-12T01:37:06.975318 | 2020-12-01T17:13:11 | 2020-12-01T17:13:11 | 48,948,936 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,233 | py | # Copyright (C) 2007-2020, Raffaele Salmaso <raffaele@salmaso.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .postgresql import Backend as Postgresql
__all__ = ["Backend"]
class Backend(Postgresql):
pass
| [
"raffaele@salmaso.org"
] | raffaele@salmaso.org |
0ce83cdf66c9e20b1fde60f7629d8e5ee1e222f4 | f6fc115dfe298305b8fab3585014fd880db283c0 | /auth/messageQueue.py | 31ac2ef7f768fafcc5f760d5bfc818e5a6ce6537 | [] | no_license | yakovdom/DC | 120f7410260c0944e72bfa8c333c1216a258e3b4 | d416905155be23436270359271c5ecc179ed4c61 | refs/heads/master | 2023-08-21T00:30:44.358627 | 2020-06-13T07:21:28 | 2020-06-13T07:21:28 | 245,479,807 | 0 | 0 | null | 2023-08-14T22:10:08 | 2020-03-06T17:29:11 | Python | UTF-8 | Python | false | false | 1,295 | py | import pika
from config import get_config
config = get_config()
QUEUE_HOST = config['rabbitmq']['host']
QUEUE_PORT = config['rabbitmq']['port']
QUEUE_LOGIN = config['rabbitmq']['user']
QUEUE_PSWD = config['rabbitmq']['password']
QUEUE_NAME = config['rabbitmq']['queue']
class MessageQueue:
def __init__(self):
credentials = pika.PlainCredentials(QUEUE_LOGIN, QUEUE_PSWD)
self.parameters = pika.ConnectionParameters(QUEUE_HOST,
QUEUE_PORT,
'/',
credentials)
def _connect(self):
self.connection = pika.BlockingConnection(self.parameters)
self.channel = self.connection.channel()
self.channel.queue_declare(queue=QUEUE_NAME)
def send_message(self, message):
try:
self.channel.basic_publish(exchange='',
routing_key=QUEUE_NAME,
body=message)
except Exception as ex:
from sys import stdout as st
st.write('\n\ntrouble {} \n\n'.format(ex))
st.flush()
self._connect()
self.send_message(message)
def close(self):
self.connection.close() | [
"yack_domnicky@mail.ru"
] | yack_domnicky@mail.ru |
ec628d33783692cf774a1407c6b159164dae7a58 | 703d4b42e0998a4ecd07a5beab8b4e63a4ef8dc3 | /accounts/views.py | 0d7929a5f6799f4916c2bca862c6a5c708f0e7d8 | [
"MIT"
] | permissive | Planik64/petstagram | d32095bdd8ad7873235acdcf09b7fc39c59b0a0b | 8dcf2966c1e19d9c6f36c39601cf8ff776fef8fb | refs/heads/main | 2023-04-13T00:08:54.976002 | 2021-04-20T21:50:08 | 2021-04-20T21:50:08 | 359,918,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,630 | py | from django.contrib.auth import login, logout
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
# Create your views here.
from accounts.forms import UserProfileForm, SignUpForm
from accounts.models import UserProfile
def user_profile(request, pk=None):
user = request.user if pk is None else User.objects.get(pk=pk)
if request.method == 'GET':
context = {
'profile_user': user,
'profile': user.userprofile,
'pets': user.pet_set.all(),
'form': UserProfileForm(),
}
return render(request, 'accounts/user_profile.html', context)
else:
form = UserProfileForm(request.POST, request.FILES, instance=user.userprofile)
if form.is_valid():
form.save()
return redirect('current user profile')
return redirect('current user profile')
def signup_user(request):
if request.method == 'GET':
context = {
'form': SignUpForm(),
}
return render(request, 'accounts/signup.html', context)
else:
# Dxe5yct3WeHcCfVbVYzaf4T3W70Aainx
form = SignUpForm(request.POST)
if form.is_valid():
user = form.save()
profile = UserProfile(
user=user,
)
profile.save()
login(request, user)
return redirect('current user profile')
context = {
'form': form,
}
return render(request, 'accounts/signup.html', context)
def signout_user(request):
logout(request)
return redirect('index') | [
"plamen.b.nikolov@gmail.com"
] | plamen.b.nikolov@gmail.com |
73bbab25409bb3a778ef3dd83a746c1a3afa4f41 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/dnsresolver/azure-mgmt-dnsresolver/generated_samples/forwarding_rule_patch.py | ec4f075536336909b5c46cae450b85e6328d0b0b | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 1,788 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.dnsresolver import DnsResolverManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-dnsresolver
# USAGE
python forwarding_rule_patch.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = DnsResolverManagementClient(
credential=DefaultAzureCredential(),
subscription_id="abdd4249-9f34-4cc6-8e42-c2e32110603e",
)
response = client.forwarding_rules.update(
resource_group_name="sampleResourceGroup",
dns_forwarding_ruleset_name="sampleDnsForwardingRuleset",
forwarding_rule_name="sampleForwardingRule",
parameters={"properties": {"forwardingRuleState": "Disabled", "metadata": {"additionalProp2": "value2"}}},
)
print(response)
# x-ms-original-file: specification/dnsresolver/resource-manager/Microsoft.Network/stable/2022-07-01/examples/ForwardingRule_Patch.json
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
c68617a69d1bfff66dcf169f3dd51fb76e870386 | f20e531a213f6892991653e7c6f2288f07c15f9c | /override/importer.py | b1de347f19b6a5b9845017b4ecd2c861cdef1048 | [
"Apache-2.0",
"MIT"
] | permissive | Clever/clever-python | 54ec744ef997d2c739ea056ae223aa35ef487876 | c123d339f9af16aa32405cdf4051ad4afbdc5bd2 | refs/heads/master | 2022-07-09T11:33:03.804757 | 2022-06-24T01:38:24 | 2022-06-24T01:38:24 | 7,064,597 | 18 | 21 | Apache-2.0 | 2022-06-24T01:38:35 | 2012-12-08T06:12:45 | Python | UTF-8 | Python | false | false | 950 | py | # Imports needed in setup.py and __init__.py
def import_json():
# Python 2.5 and below do not ship with json
_json_loaded = None
try:
import json
if hasattr(json, 'loads'):
return json
_json_loaded = False
except ImportError:
pass
try:
import simplejson
return simplejson
except ImportError:
if _json_loaded is None:
raise ImportError("Clever requires a JSON library, which you do not appear to have. Please install the simplejson library. HINT: Try installing the python simplejson library via 'pip install simplejson' or 'easy_install simplejson'.")
else:
raise ImportError("Clever requires a JSON library with the same interface as the Python 2.6 'json' library. You appear to have a 'json' library with a different interface. Please install the simplejson library. HINT: Try installing the python simplejson library via 'pip install simplejson' or 'easy_install simplejson'.")
| [
"amelia.jones@clever.com"
] | amelia.jones@clever.com |
99e3c192c64912e7758a796e19378225ef653ade | 91cee59084b5aae6e64cc98d36a5fe665ed910e0 | /智能软硬件 (python全栈系列)/Micropython_esp32/bluetooth(demo)/ble.py | 5eb63f213ee53f26d035b572ba9568512d35249e | [] | no_license | ekongyun/History | 46cabcec17a58720a67d3398b00b8aafe5645092 | 04ea8bc71dc87fa957670fbd21e25739291b1027 | refs/heads/master | 2020-07-30T04:02:20.568594 | 2019-09-22T02:23:14 | 2019-09-22T02:23:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 485 | py | import bluetooth
bt = bluetooth.Bluetooth()
bt.active(1)
bt.advertise(100, 'MicroPython')
print('----')
tx = bluetooth.Characteristic('6E400002-B5A3-F393-E0A9-E50E24DCCA9E', bluetooth.FLAG_READ|bluetooth.FLAG_NOTIFY)
rx = bluetooth.Characteristic('6E400003-B5A3-F393-E0A9-E50E24DCCA9E', bluetooth.FLAG_WRITE)
s = bt.add_service('6E400001-B5A3-F393-E0A9-E50E24DCCA9E', [tx, rx])
tx.write('foo')
def callback(char, data):
print('on update data:', data)
rx.on_update(callback)
| [
"ywzsunny@buaa.edu.cn"
] | ywzsunny@buaa.edu.cn |
2818d0529d9825314d6b9002411e1bf622ffdedd | dc55688007e03dff5292d3d2f72c0bfc28cc6f2f | /models.py | 69f8fd273d7063c97c09fc8e445a5be4d84585a4 | [] | no_license | pscohn/micro.py | 2d3cf5591b79ec6f55f36af2d8366aa6700573a1 | 451b2a791cac9f31f78a0e2f4c39bc9b7866e08c | refs/heads/master | 2016-09-05T20:38:18.724689 | 2015-02-10T22:50:24 | 2015-02-10T22:50:24 | 30,336,754 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,540 | py | import re
import feedparser
import orm as model
class User(model.Model):
username = model.Field()
default_user = model.Field(default=False)
unread_only = model.Field(default=False)
def subscribed(self):
feeds = []
for sub in Subscription.all(user=self):
feeds.append(Feed.get(id=sub.feed.id))
return feeds
def set_default_user(self):
u = User.get(default_user=True)
u.default_user = False
u.save()
self.default_user = True
self.save()
@classmethod
def get_default_user(self):
return User.get(default_user=True)
class Feed(model.Model):
user = model.ForeignKey(User)
title = model.Field()
link = model.Field()
# subscribed = model.Field(default=True)
rss = model.Field()
def get_feed(self):
feed = feedparser.parse(self.rss)
return feed
@classmethod
def feed_exists(cls, link):
return not Feed.is_unique(link=link)
class Subscription(model.Model):
feed = model.ForeignKey(Feed)
user = model.ForeignKey(User)
class FeedList(model.Model):
name = model.Field()
user = model.ForeignKey(User)
@classmethod
def check_unique(cls, user, name):
return FeedList.is_unique(user=user, name=name)
class FeedListItem(model.Model):
feed_list = model.ForeignKey(FeedList)
feed = model.ForeignKey(Feed)
@classmethod
def get_feeds(cls, feed_list):
feeds = []
for i in FeedListItem.all(feed_list=feed_list):
feeds.append(i.feed)
return feeds
class Entry(model.Model):
feed = model.ForeignKey(Feed)
user = model.ForeignKey(User)
starred = model.Field(default=False)
title = model.Field()
updated = model.Field()
created = model.Field()
author = model.Field()
content = model.Field()
link = model.Field()
def set_source(self):
source_sub = re.sub('https?://', '', self.link)
source = source_sub.split('/')[0]
source = source.split('.')
source = '.'.join(source[-2:])
self.source = source
def get_source(self):
self.set_source()
return self.source
@classmethod
def entry_exists(cls, link):
return not Entry.is_unique(link=link)
@classmethod
def get_starred(cls, user):
return list(Entry.all(user=user, starred=True))
@classmethod
def search_all(cls, user, query):
matches = []
for e in Entry.all(user=user):
if query in e.title.lower() or query in c.content.lower():
matches.append(e)
return matches
class EntryList(model.Model):
name = model.Field()
user = model.ForeignKey(User)
class EntryListItem(model.Model):
entry_list = model.ForeignKey(EntryList)
entry = model.ForeignKey(Entry)
@classmethod
def get_articles(cls, entry_list):
articles = []
for i in EntryListItem.all(entry_list=entry_list):
articles.append(Entry.get(id=i.id))
return articles
class Filter(model.Model):
name = model.Field()
user = model.ForeignKey(User)
class FilterItem(model.Model):
filter = model.ForeignKey(Filter)
entry = model.ForeignKey(Entry)
@classmethod
def get_articles(cls, filter_, limit='ALL', offset=0):
entries = []
for item in FilterItem.all(separator='OR', order_by='-id', limit=limit, offset=offset, filter=filter_):
entries.append(Entry.get(id=item.entry.id))
return entries
| [
"pscohn@gmail.com"
] | pscohn@gmail.com |
4171d90d4b1c5c3de4ae8c53bd8bdef3775a16e4 | 421a9f7ba8e0b381e3c19468bd809ba298150bed | /main.py | 1ad56b79f8af1d020528db4f5cab10945123f8d0 | [] | no_license | wvzi/Thunder | 743ba502f5867213af28b568ecca91f25f12c164 | a53c8a5dfb6f46bda1561cbf966feea3cf7d5fcc | refs/heads/main | 2023-08-07T17:22:04.985555 | 2021-09-09T19:10:38 | 2021-09-09T19:10:38 | 404,780,123 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,415 | py |
thunder = """
████████╗██╗░░██╗██╗░░░██╗███╗░░██╗██████╗░███████╗██████╗░
╚══██╔══╝██║░░██║██║░░░██║████╗░██║██╔══██╗██╔════╝██╔══██╗
░░░██║░░░███████║██║░░░██║██╔██╗██║██║░░██║█████╗░░██████╔╝
░░░██║░░░██╔══██║██║░░░██║██║╚████║██║░░██║██╔══╝░░██╔══██╗
░░░██║░░░██║░░██║╚██████╔╝██║░╚███║██████╔╝███████╗██║░░██║
░░░╚═╝░░░╚═╝░░╚═╝░╚═════╝░╚═╝░░╚══╝╚═════╝░╚══════╝╚═╝░░╚═╝
"""
print(thunder)
import discord,json,os,random
from discord.ext import commands
with open("config.json") as file: # json loads
info = json.load(file)
token = info["token"]
delete = info["autodelete"]
prefix = info["prefix"]
bot = commands.Bot(command_prefix=prefix)
@bot.event
async def on_ready():
print("Thunder is running smooth!")
@bot.command() # Stock command
async def stock(ctx):
stockmenu = discord.Embed(title="Account Stock",description="") # def stock
for filename in os.listdir("Accounts"):
with open("Accounts\\"+filename) as f: # checkfile
ammount = len(f.read().splitlines()) # get lines
name = (filename[0].upper() + filename[1:].lower()).replace(".txt","")
stockmenu.description += f"*{name}* - {ammount}\n" # add to embed
await ctx.send(embed=stockmenu) # Embed var send
@bot.command() # main gen command
async def gen(ctx,name=None):
if name == None:
await ctx.send("Please, Specify what type of account you want")
else:
name = name.lower()+".txt" #.txt ext
if name not in os.listdir("Accounts"): # if
await ctx.send(f"Account does not exist! `{prefix}stock`")
else:
with open("Accounts\\"+name) as file:
lines = file.read().splitlines() #rlines
if len(lines) == 0:
await ctx.send("Sorry! We do not have stock on these accountsd")
else:
with open("Accounts\\"+name) as file:
account = random.choice(lines) # get a random line
try: # try var
await ctx.author.send(f"`{str(account)}`\n\nMessage will delete in {str(delete)} seconds!",delete_after=delete)
except:
await ctx.send("Error, Please Turn on your DMs!")
else:
await ctx.send("Sent the account to your DMs!")
with open("Accounts\\"+name,"w") as file:
file.write("")
with open("Accounts\\"+name,"a") as file:
for line in lines:
if line != account:
file.write(line+"\n")
bot.run(token) | [
"noreply@github.com"
] | noreply@github.com |
9368280d9ee457a1cfa73ecf4480b343a178dfb7 | cbe003e08110a88b33b953d4597da286cf2e8ea5 | /Models.py | 8d802ea8b19fd3fffd34ca3f5ea04f880dad3e2e | [] | no_license | nanoop14/IML411CS_Anoop_Nagabhushana | 3a35419c71289dcc94e828789667d45589fce3e2 | 8c825f85506a4cbcb9fb73a54c61663e3cf0c679 | refs/heads/master | 2020-04-10T06:14:21.522892 | 2018-12-08T05:17:33 | 2018-12-08T05:17:33 | 160,849,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,316 | py | import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import numpy as np
from sklearn.linear_model import LogisticRegression
import util
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
def run_nn(x_train,y_train,x_val,y_val):
seed=7
np.random.seed(seed)
#model for nn:
model_nn_with_stop_words = tf.keras.Sequential()
model_nn_with_stop_words.add(layers.Dense(128,kernel_regularizer=tf.keras.regularizers.l2(0.001), activation='relu', input_dim=300))
model_nn_with_stop_words.add(layers.Dense(128,kernel_regularizer=tf.keras.regularizers.l2(0.001), activation='relu'))
model_nn_with_stop_words.add(layers.Dense(128,kernel_regularizer=tf.keras.regularizers.l2(0.001), activation='relu'))
model_nn_with_stop_words.add(layers.Dense(128,kernel_regularizer=tf.keras.regularizers.l2(0.001), activation='relu'))
model_nn_with_stop_words.add(layers.Dense(128,kernel_regularizer=tf.keras.regularizers.l2(0.001), activation='relu'))
model_nn_with_stop_words.add(layers.Dense(64,kernel_regularizer=tf.keras.regularizers.l2(0.001), activation='relu'))
model_nn_with_stop_words.add(layers.Dense(1, activation='sigmoid'))
model_nn_with_stop_words.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
history=model_nn_with_stop_words.fit(x_train,y_train,
validation_data=(x_val, y_val),
epochs=15, batch_size=32, verbose=2)
print(history.history.keys())
print("saving nn model:~/model_performance/Neural_network/nn")
model_nn_with_stop_words.save(filepath='model_performance/Neural_network/nn',overwrite=True)
history.history.keys()
plt.plot(model_nn_with_stop_words.history.history['acc'])
plt.plot(model_nn_with_stop_words.history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'Val'], loc='upper left')
plt.show()
return history
def run_lr(x_train,y_train,x_val,y_val):
#use this to tune params:
possible_c_values=[1,2]
lr_accuracy=[]
train_accuracy=[]
for c in possible_c_values:
lr=LogisticRegression(solver='sag',random_state=200,C=c)
lr.fit(x_train,y_train)
lr_pred=lr.predict(x_val)
print("For c: {0}".format(c))
lr_accuracy.append(util.getModelAccuracy(y_val,lr_pred,np.array([0,1]),print_details=True))
train_accuracy.append(util.getModelAccuracy(y_train,lr.predict(x_train),np.array([0,1])))
util.getPlot(possible_c_values,lr_accuracy,"Lr c values","accuracy","validation acc",colour='b')
util.getPlot(possible_c_values,train_accuracy,"Lr c values","accuracy","train acc",colour='r')
def run_adaboost(x_train,y_train,x_val,y_val):
#use this to tune params:
possible_c_vlaues=[1,2]
ada_accuracy=[]
ada_train_accuracy=[]
for c in possible_c_vlaues:
ada=AdaBoostClassifier(n_estimators = c)
ada.fit(x_train,y_train)
print("For c: {0}".format(c))
ada_pred=ada.predict(x_val)
ada_accuracy.append(util.getModelAccuracy(y_val,ada_pred,np.array([0,1]),print_details=True))
ada_train_accuracy.append(util.getModelAccuracy(y_train,ada.predict(x_train),np.array([0,1])))
util.getPlot(possible_c_vlaues,ada_accuracy,"ADABoost number of trees","accuracy","validation acc",colour='b')
util.getPlot(possible_c_vlaues,ada_train_accuracy,"ADABoost number of trees","train acc",colour='r')
def run_rf(x_train,y_train,x_val,y_val):
#use this to tune params:
possible_c_values=[1,2]
rf_accuracy=[]
train_accuracy=[]
for c in possible_c_values:
rf=RandomForestClassifier(n_estimators=c,max_depth=7)
rf.fit(x_train,y_train)
rf_pred=rf.predict(x_val)
print("For c: {0}".format(c))
rf_accuracy.append(util.getModelAccuracy(y_val,rf_pred,np.array([0,1]),print_details=True))
train_accuracy.append(util.getModelAccuracy(y_train,rf.predict(x_train),np.array([0,1])))
util.getPlot(possible_c_values,rf_accuracy,"Rf c values for depth 7","accuracy","validation acc",colour='b')
util.getPlot(possible_c_values,train_accuracy,"Rf c values for depth 7","accuracy ","train acc",colour='r')
| [
"noreply@github.com"
] | noreply@github.com |
598461f24de83038d812ad637e5b7f69e0d078df | 3fbd05e539e12e05fd0b75109a0d7d3e36c61946 | /app/migrations/0004_auto_20200402_0929.py | 13ab601e92dbe3440b64def61678bb7b9ae02c72 | [] | no_license | Diana-Toledo/Naturopatia | 1eaa8ed6a663f755a207c18d6266bba8a7708648 | 74c6d5063aef1ae46ade17209e705afacaf4117c | refs/heads/main | 2023-07-02T19:48:01.299930 | 2021-08-01T16:20:19 | 2021-08-01T16:20:19 | 391,036,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | # Generated by Django 2.2.11 on 2020-04-02 07:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0003_auto_20200331_1106'),
]
operations = [
migrations.CreateModel(
name='Info',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('titulo', models.CharField(max_length=80, null=True)),
('texto', models.TextField(null=True)),
('logo', models.ImageField(upload_to='static/img')),
('tel', models.TextField(null=True)),
],
),
migrations.DeleteModel(
name='Inicio',
),
]
| [
"diaelitg@gmail.com"
] | diaelitg@gmail.com |
e5f55207dd9a043e94437287cbd5b94a341aeb9a | 5ec3dc6d172d758f9f547686b68cbbe903ab3161 | /test/no_running_jobs_test.py | 7740a6977d3edacfdbd71a677bae1499bce61a23 | [] | no_license | dixudx/jenkinsflow | ea8bdf4b8abdfb06ab6e05f5c5a83a1c0744f849 | 2c07f8fc2951d9167dcd08ae2e1f6a8afc32f7f5 | refs/heads/master | 2020-12-26T00:25:27.092813 | 2015-05-19T19:41:47 | 2015-05-19T19:41:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,964 | py | # Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from pytest import raises
from jenkinsflow.flow import serial, JobNotIdleException
from jenkinsflow.mocked import hyperspeed
from .cfg import ApiType
from .framework import api_select
from .framework.utils import assert_lines_in
def test_no_running_jobs(capsys):
with api_select.api(__file__, login=True) as api:
api.flow_job()
api.job('j1', exec_time=50, max_fails=0, expect_invocations=1, expect_order=None, invocation_delay=0, unknown_result=True)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix) as ctrl1:
ctrl1.invoke_unchecked('j1')
sout, _ = capsys.readouterr()
assert_lines_in(sout, "unchecked job: 'jenkinsflow_test__no_running_jobs__j1' UNKNOWN - RUNNING")
# Make sure job has actually started before entering new flow
hyperspeed.sleep(1)
with raises(JobNotIdleException) as exinfo:
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix) as ctrl1:
ctrl1.invoke('j1')
assert "job: 'jenkinsflow_test__no_running_jobs__j1' is in state RUNNING. It must be IDLE." in exinfo.value.message
def test_no_running_jobs_unchecked(capsys):
with api_select.api(__file__, login=True) as api:
api.flow_job()
api.job('j1', exec_time=50, max_fails=0, expect_invocations=1, expect_order=None, invocation_delay=0, unknown_result=True)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix) as ctrl1:
ctrl1.invoke_unchecked('j1')
sout, _ = capsys.readouterr()
assert_lines_in(sout, "unchecked job: 'jenkinsflow_test__no_running_jobs_unchecked__j1' UNKNOWN - RUNNING")
hyperspeed.sleep(1)
with raises(JobNotIdleException) as exinfo:
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix) as ctrl1:
ctrl1.invoke_unchecked('j1')
assert "unchecked job: 'jenkinsflow_test__no_running_jobs_unchecked__j1' is in state RUNNING. It must be IDLE." in exinfo.value.message
def test_no_running_jobs_jobs_allowed():
with api_select.api(__file__, login=True) as api:
api.flow_job()
exp_invocations = 2 if api.api_type != ApiType.MOCK else 1
unknown_result = False if api.api_type != ApiType.MOCK else True
api.job('j1', exec_time=20, max_fails=0, expect_invocations=exp_invocations, expect_order=None,
invocation_delay=0, unknown_result=unknown_result)
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix) as ctrl1:
ctrl1.invoke_unchecked('j1')
hyperspeed.sleep(1)
# TODO
if api.api_type != ApiType.MOCK:
with serial(api, timeout=70, job_name_prefix=api.job_name_prefix, require_idle=False) as ctrl1:
ctrl1.invoke('j1')
| [
"lhn@hupfeldtit.dk"
] | lhn@hupfeldtit.dk |
6922d6d41191fd0681fdcee505890f0d0c1cf1c4 | 960077138e79a6f28b2f8bfdd32664a06e6cbb13 | /sig_app.py | 1b79c0ae736d3326a136a8064bfac92d8b57d190 | [] | no_license | vscherbo/apps | 3c41ab07e0c84d4cff48915f161f82b8993bdbc8 | 9bdca690006ccbfe2e0985d5572d3f2ed3d2e587 | refs/heads/master | 2022-12-22T12:22:36.820765 | 2022-12-09T10:43:54 | 2022-12-09T10:43:54 | 246,818,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | #!/usr/bin/env python
""" Application with signal handler
"""
import time
import signal
import logging
class Application:
""" class for apps with signal handle """
def __init__(self):
logging.getLogger(__name__).addHandler(logging.NullHandler())
signal.signal(signal.SIGINT, lambda signal, frame: self._signal_handler())
self.terminated = False
def _signal_handler(self):
self.terminated = True
logging.debug('%s signal_handler', __name__)
def _main(self):
logging.debug("Default _main() proc: %s", self.__dict__)
time.sleep(3)
def main_loop(self):
""" loop while not terminated """
while not self.terminated:
self._main()
def method_1(self):
""" some method """
if __name__ == '__main__':
APP = Application()
APP.main_loop()
print("The app is terminated, exiting ...")
| [
"vscherbo@gmail.com"
] | vscherbo@gmail.com |
503291f3903d71c769695f844e81e4686d85cf7d | 0c4f5e5fa9c09cd87dae8e81d208a36354cf2d2d | /para_placement/config.py | c98bfefa873aab498e712dcf1b6f05162ed750c9 | [] | no_license | fnrg-nfv/flexchain | 67b1d4d781312f9805181b2465b1bbf7efa5be2d | 1bb5c9f8f054525f79a7fea34936a13f7a6a5419 | refs/heads/master | 2022-12-18T19:14:13.119054 | 2020-09-30T10:27:58 | 2020-09-30T10:27:58 | 196,699,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 944 | py | import random
import enum
def TOPO_CONFIG(): return None
# TOPO_CONFIG.latency = lambda: 0.05 # ok
TOPO_CONFIG.latency = lambda: 0.001
TOPO_CONFIG.bandwidth = lambda: 1000 # ok not write no source
TOPO_CONFIG.cpu = lambda: random.randint(4000, 8000) # ok not write
def SFC_CONFIG(): return None
SFC_CONFIG.size = lambda: random.randint(3, 7) # ok
# required throuphput
SFC_CONFIG.r_throughput = lambda: min(max(random.gauss(100, 50), 10), 200)
# required latency
# SFC_CONFIG.r_latency = lambda: random.uniform(1.0, 3.0)
SFC_CONFIG.r_latency = lambda: random.uniform(0.5, 1.5)
# vnf cpu overhead
SFC_CONFIG.vnf_cpu = lambda: random.randint(1000, 2000) # no source not write
# vnf latency overhead
SFC_CONFIG.vnf_latency = lambda: random.uniform(0.045, 0.3) # ok
K = 8000
K_MIN = 128
GC_BFS = False
class Setting(enum.Enum):
flexchain = 1
nfp_naive = 2
parabox_naive = 3
no_para = 4
state = Setting.flexchain
| [
"virgilma28@gmail.com"
] | virgilma28@gmail.com |
b43f005983c986b9fbe055bd62c1254d8d65c0d0 | c8533fb7a247bfd7511b1dc86a9bba98602f0398 | /portfolio/views.py | 2092d09cc2ff6c869417de7b3044d048b9fec576 | [] | no_license | karanraghani/MySite | f9cf7fe34f8a05895931e65521021c66e882a6bc | 062dfd359e5cf27e5d34e98ad2b92ada9780c214 | refs/heads/master | 2020-03-18T17:46:01.777246 | 2018-06-03T08:28:06 | 2018-06-03T08:28:06 | 135,049,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,467 | py | from django.shortcuts import render, redirect
from portfolio.models import message
from portfolio.forms import contact_form
from django.template.loader import get_template
from django.core.mail import EmailMessage, send_mail
def home_page(request):
return render(request, 'portfolio/index.html', {})
#def resume(request):
def projects(request):
return render(request, 'portfolio/projects.html', {})
def contact_me(request):
form = contact_form
if request.method == 'POST':
form = contact_form(request.POST)
if form.is_valid():
name = request.POST.get('name','')
email = request.POST.get('email','')
website = request.POST.get('website','')
text = request.POST.get('text','')
template = get_template('portfolio/email_template.txt')
context = {
'name': name,
'email': email,
'website': website,
'text': text,
}
content = template.render(context)
print (name,email,content)
print ('email sending')
'''
email = EmailMessage(
"New Message from Submission",
content,
"karanraghani.me",
['karanraghani14@gmail.com'],
)
email.send()
'''
send_mail(
"New Message from Submission",
content,
"karanraghani.me",
['karanraghani14@gmail.com'],
fail_silently=False
)
print('email send')
# send an email to me with the message
return redirect(home_page)
return render(request, 'portfolio/contact_me.html', {'form': form, 'form_error':form.errors, }) | [
"karanraghani14@gmail.com"
] | karanraghani14@gmail.com |
00d03cc2dd392561308b3019724e793ee7ea4fb1 | dff17b32038e14ad57e623b61b9e8aa5a540cf31 | /LOG_Analyzer/latency_analyzer.py | 2821e696295ba526ce394769a9e1dc2c02fbd498 | [] | no_license | supermt/log_analysis_rocksdb | 121649eaa328b34424d68d3f7dd291a00043ffec | 4ef995419d9e360513f6b8b9cf1d535e6aa51017 | refs/heads/master | 2020-07-26T20:47:39.402228 | 2019-09-30T04:50:27 | 2019-09-30T04:50:27 | 208,761,237 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,035 | py | #!/usr/bin/python3
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import random
def Average(lst):
return round(sum(lst)/len(lst), 2)
data = open('run_log_large.txt', 'r')
line = data.readline()
max_key = 0
min_key = 0xffffffffffffffff
values = []
x = 0
couting = {}
latencies = {}
while line:
if line.split(',')[0] == "Point":
value = line.split(',')[1] # Delimiter is comma
latency = int(line.split(',')[2].replace("\n", ""))
value = int(value.replace("user", ""))
if value > max_key:
max_key = value
if value < min_key:
min_key = value
couting[value] = couting.get(value, 0) + 1
# latencies[value] = latencies.get(value,[]).append(latency)
latencies[value] = latencies.get(value, [])
latencies[value].append(latency)
line = data.readline()
x_r = []
y_r = []
avg_latency = []
max_latency = []
min_latency = []
temp = 0.0
for key, value in sorted(couting.items(), key=lambda item: item[1]):
# temp += couting[key]
x_r.append(key)
y_r.append(value)
avg_latency.append(Average(latencies[key]))
max_latency.append(max(latencies[key]))
min_latency.append(min(latencies[key]))
print(latencies[x_r[-1]])
# print range(min,max,(max-min)/100)
# y_r = np.array(y_r)
# y_r = y_r / temp
fig = plt.figure()
# creating a subplot
axes = fig.subplots(3)
axes[0].plot(avg_latency)
axes[0].set_ylabel("latency(ns)")
temp = axes[0].twinx()
temp.set_ylabel('access times')
temp.set_yscale('log')
temp.plot(y_r,'r')
axes[1].plot(max_latency)
axes[1].set_ylabel("latency(ns)")
temp = axes[1].twinx()
temp.set_ylabel('access times')
temp.set_yscale('log')
temp.plot(y_r,'r')
axes[2].plot(min_latency)
axes[2].set_ylabel("latency(ns)")
temp = axes[2].twinx()
temp.set_ylabel('access times')
temp.set_yscale('log')
temp.plot(y_r,'r')
# ax1.set_ylim(min(min_latency), max(max_latency))
# plt.title('Inserting data description')
plt.grid(True)
plt.show()
| [
"18007092757@163.com"
] | 18007092757@163.com |
8850dd5b3a9e29b53ecc9a07f7502c46a81ed2d7 | 2b79029e8c1c4650f5d80b21cd9297d13037c94c | /webapp/.ipynb_checkpoints/__init__-checkpoint.py | 986f4f66ded642f1dfe46f367c7fdda974e11ff1 | [
"MIT"
] | permissive | alxanderpierre/data-science | 9171c80c8e91cc3e1fdbc92ebee252dfea84af3a | deef7f949d1e227e716e7a59c1667cc1032e3157 | refs/heads/master | 2020-12-08T02:07:05.939277 | 2020-01-09T00:07:39 | 2020-01-09T00:07:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | from .app import create_app
#APP is global variable
APP = create_app() | [
"kking1795@gmail.com"
] | kking1795@gmail.com |
348ec2eec4d21d7506aea88f63e6a2a997a674b6 | 131caeecc070839555b95382fe9c6ea77a618dce | /.history/Classiles/scynced_lights_20210615180248.py | a220c037ce3499a5a4636818e84bdd60366e17aa | [
"Unlicense"
] | permissive | minefarmer/Coding101-OOP | f128e34c95f5362b3d9a53bbac3d862c3f256263 | d5655977559e3bd1acf6a4f185a6121cc3b05ce4 | refs/heads/main | 2023-05-22T18:42:37.769345 | 2021-06-18T00:28:06 | 2021-06-18T00:28:06 | 376,620,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | """[Scynced Lights]
Class attributes are "shared"
"""
class Light:
pass
a = Light()
b = Ligth()
| [
"pgoldfarm@yahoo.com"
] | pgoldfarm@yahoo.com |
f2580f6105ea4a96c942696ed4662ee9691c1104 | 9763c31318311e8ffb0022a61184582e159411eb | /adagios/misc/forms.py | 15fb0effc1c9c650392f8983fe0b1638d26bd624 | [] | no_license | spil-sean/adagios | 832fa7bff9c4d7bb2238c450870274aa47bff42e | 03615724166d104196dbbda6a4025830ba58955b | refs/heads/master | 2021-01-16T19:12:17.024497 | 2013-05-30T13:05:44 | 2013-05-30T13:05:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,210 | py | # -*- coding: utf-8 -*-
#
# Copyright 2010, Pall Sigurdsson <palli@opensource.is>
#
# This script is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This script is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from django import forms
from django.core.mail import send_mail
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
import os.path
from adagios import settings
from pynag import Model, Control
from django.core.mail import EmailMultiAlternatives
import pynag.Parsers
import pynag.Control.Command
TOPIC_CHOICES = (
('general', 'General Suggestion'),
('bug', 'I think i have found a bug'),
('suggestion', 'I have a particular task in mind that i would like to do with Adagios'),
('easier', 'I have an idea how make a certain task easier to do'),
)
class ContactUsForm(forms.Form):
topic = forms.ChoiceField(choices=TOPIC_CHOICES)
sender = forms.CharField(
required=False,
help_text="Optional email address if you want feedback from us",
)
message = forms.CharField(
widget=forms.widgets.Textarea(attrs={'rows':15, 'cols':40}),
help_text="See below for examples of good suggestions",
)
def save(self):
from_address = 'adagios@adagios.opensource.is'
to_address = ["palli@ok.is"]
subject = "Suggestion from Adagios"
sender = self.cleaned_data['sender']
topic = self.cleaned_data['topic']
message = self.cleaned_data['message']
msg = """
topic: %s
from: %s
%s
""" % (topic,sender,message)
send_mail(subject, msg, from_address, to_address, fail_silently=False)
class AdagiosSettingsForm(forms.Form):
nagios_config = forms.CharField(required=False, initial=settings.nagios_config, help_text="Path to nagios configuration file. i.e. /etc/nagios/nagios.cfg")
destination_directory = forms.CharField(required=False, initial=settings.destination_directory, help_text="Where to save new objects that adagios creates.")
nagios_url = forms.CharField(required=False, initial=settings.nagios_url, help_text="URL (relative or absolute) to your nagios webcgi. Adagios will use this to make it simple to navigate from a configured host/service directly to the cgi.")
nagios_init_script = forms.CharField(help_text="Path to you nagios init script. Adagios will use this when stopping/starting/reloading nagios")
nagios_binary = forms.CharField(help_text="Path to you nagios daemon binary. Adagios will use this to verify config with 'nagios -v nagios_config'")
enable_githandler = forms.BooleanField(required=False, initial=settings.enable_githandler, help_text="If set. Adagios will commit any changes it makes to git repository.")
enable_loghandler = forms.BooleanField(required=False, initial=settings.enable_loghandler, help_text="If set. Adagios will log any changes it makes to a file.")
enable_authorization = forms.BooleanField(required=False, initial=settings.enable_authorization, help_text="If set. Users in Status view will only see hosts/services they are a contact for. Unset means everyone will see everything.")
enable_status_view = forms.BooleanField(required=False, initial=settings.enable_status_view, help_text="If set. Enable status view which is an alternative to nagios legacy web interface. You will need to restart web server for the changes to take effect")
warn_if_selinux_is_active = forms.BooleanField(required=False, help_text="Adagios does not play well with SElinux. So lets issue a warning if it is active. Only disable this if you know what you are doing.")
pnp_filepath = forms.CharField(help_text="Full path to your pnp4nagios/index.php file. Adagios will use this to generate graphs")
pnp_url = forms.CharField(help_text="Full or relative url to pnp4nagios web interface, adagios can use this to link directly to pnp")
include = forms.CharField(required=False, help_text="Include configuration options from files matching this pattern")
def save(self):
# First of all, if configfile does not exist, lets try to create it:
if not os.path.isfile( settings.adagios_configfile ):
open(settings.adagios_configfile, 'w').write("# Autocreated by adagios")
for k,v in self.cleaned_data.items():
Model.config._edit_static_file(attribute=k, new_value=v, filename=settings.adagios_configfile)
self.adagios_configfile = settings.adagios_configfile
#settings.__dict__[k] = v
def __init__(self, *args,**kwargs):
# Since this form is always bound, lets fetch current configfiles and prepare them as post:
if 'data' not in kwargs or kwargs['data'] == '':
kwargs['data'] = settings.__dict__
super(self.__class__,self).__init__(*args,**kwargs)
def clean_pnp_filepath(self):
filename = self.cleaned_data['pnp_filepath']
return self.check_file_exists(filename)
def clean_destination_directory(self):
filename = self.cleaned_data['destination_directory']
return self.check_file_exists(filename)
def clean_nagios_init_script(self):
filename = self.cleaned_data['nagios_init_script']
if filename.startswith('sudo'):
self.check_file_exists(filename.split()[1])
else:
self.check_file_exists(filename)
return filename
def clean_nagios_binary(self):
filename = self.cleaned_data['nagios_binary']
return self.check_file_exists(filename)
def clean_nagios_config(self):
filename = self.cleaned_data['nagios_config']
return self.check_file_exists(filename)
def check_file_exists(self, filename):
""" Raises validation error if filename does not exist """
if not os.path.exists(filename):
raise forms.ValidationError('No such file or directory')
return filename
def clean(self):
cleaned_data = super(self.__class__, self).clean()
for k,v in cleaned_data.items():
# Convert all unicode to quoted strings
if type(v) == type(u''):
cleaned_data[k] = str('''"%s"''' % v)
# Convert all booleans to True/False strings
elif type(v) == type(False):
cleaned_data[k] = str(v)
return cleaned_data
class EditAllForm(forms.Form):
""" This form intelligently modifies all attributes of a specific type.
"""
def __init__(self, object_type, attribute, new_value, *args, **kwargs):
self.object_type = object_type
self.attribute = attribute
self.new_value = new_value
super(self.__class__, self).__init__(self, args, kwargs)
filter = {}
filter['object_type'] = object_type
filter['%s__isnot' % attribute] = new_value
items = Model.ObjectDefinition.objects.filter(**filter)
interesting_objects = []
for i in items:
if attribute in i._defined_attributes or i.use is None:
interesting_objects.append( i )
self.interesting_objects = interesting_objects
for i in interesting_objects:
self.fields[ 'modify_%s' % i.get_id() ] = forms.BooleanField(required=False,initial=True)
class PNPActionUrlForm(forms.Form):
""" This form handles applying action_url to bunch of hosts and services """
#apply_action_url = forms.BooleanField(required=False,initial=True,help_text="If set, apply action_url to every service object in nagios")
action_url=forms.CharField(required=False,initial="/pnp4nagios/graph?host=$HOSTNAME$&srv=$SERVICEDESC$", help_text="Reset the action_url attribute of every service check in your nagios configuration with this one. ")
def save(self):
action_url = self.cleaned_data['action_url']
services = Model.Service.objects.filter(action_url__isnot=action_url)
self.total_services = len(services)
self.error_services = 0
for i in services:
if 'action_url' in i._defined_attributes or i.use is None:
i.action_url = action_url
try:
i.save()
except Exception:
self.error_services += 1
class PNPTemplatesForm(forms.Form):
""" This form manages your pnp4nagios templates """
def __init__(self, *args,**kwargs):
self.template_directories = []
self.templates = []
tmp = Model.config._load_static_file('/etc/pnp4nagios/config.php')
for k,v in tmp:
if k == "$conf['template_dirs'][]":
# strip all ' and " from directory
directory = v.strip(";").strip('"').strip("'")
self.template_directories.append( directory )
if os.path.isdir(directory):
for file in os.listdir(directory):
self.templates.append( "%s/%s" % (directory,file))
#self.templates.append(file)
super(self.__class__,self).__init__(*args,**kwargs)
pnp_loglevel_choices = [ ('0', '0 - Only Errors'), ('1', '1 - Little logging'), ('2', '2 - Log Everything'), ('-1','-1 Debug mode (log all and slower processing')]
pnp_log_type_choices = [('syslog','syslog'),('file','file')]
class PNPConfigForm(forms.Form):
""" This form handles the npcd.cfg configuration file """
user = forms.CharField(help_text="npcd service will have privileges of this group")
group = forms.CharField(help_text="npcd service will have privileges of this user")
log_type = forms.ChoiceField(widget=forms.RadioSelect, choices=pnp_log_type_choices, help_text="Define if you want to log to 'syslog' or 'file'")
log_file = forms.CharField(help_text="If log_type is set to file. Log to this file")
max_logfile_size = forms.IntegerField(help_text="Defines the maximum filesize (bytes) before logfile will rotate.")
log_level = forms.ChoiceField(help_text="How much should we log?", choices=pnp_loglevel_choices)
perfdata_spool_dir = forms.CharField(help_text="where we can find the performance data files")
perfdata_file_run_cmd = forms.CharField(help_text="execute following command for each found file in perfdata_spool_dir")
perfdata_file_run_cmd_args = forms.CharField(required=False, help_text="optional arguments to perfdata_file_run_cmd")
identify_npcd = forms.ChoiceField(widget=forms.RadioSelect, choices=(('1','Yes'),('0', 'No')), help_text="If yes, npcd will append -n to the perfdata_file_run_cmd")
npcd_max_threads = forms.IntegerField(help_text="Define how many parallel threads we should start")
sleep_time = forms.IntegerField(help_text="How many seconds npcd should wait between dirscans")
load_threshold = forms.FloatField(help_text="npcd won't start if load is above this threshold")
pid_file = forms.CharField(help_text="Location of your pid file")
perfdata_file = forms.CharField(help_text="Where should npcdmod.o write the performance data. Must not be same directory as perfdata_spool_dir")
perfdata_spool_filename = forms.CharField(help_text="Filename for the spooled files")
perfdata_file_processing_interval = forms.IntegerField(help_text="Interval between file processing")
def __init__(self,initial={}, *args,**kwargs):
my_initial = {}
# Lets use PNPBrokerModuleForm to find sensible path to npcd config file
broker_form = PNPBrokerModuleForm()
self.npcd_cfg = broker_form.initial.get('config_file')
npcd_values = Model.config._load_static_file(self.npcd_cfg)
for k,v in npcd_values:
my_initial[k] = v
super(self.__class__,self).__init__(initial=my_initial,*args,**kwargs)
def save(self):
for i in self.changed_data:
Model.config._edit_static_file(attribute=i, new_value=self.cleaned_data[i], filename=self.npcd_cfg)
class EditFileForm(forms.Form):
""" Manages editing of a single file """
filecontent = forms.CharField( widget=forms.Textarea(attrs={ 'wrap':'off', 'rows':'50', 'cols':'2000'}) )
def __init__(self,filename,initial={},*args,**kwargs):
self.filename = filename
my_initial = initial.copy()
if 'filecontent' not in my_initial:
my_initial['filecontent'] = open(filename).read()
super(self.__class__,self).__init__(initial=my_initial, *args,**kwargs)
def save(self):
if 'filecontent' in self.changed_data:
data = self.cleaned_data['filecontent']
open(self.filename,'w').write(data)
class PNPBrokerModuleForm(forms.Form):
""" This form is responsible for configuring PNP4Nagios. """
#enable_pnp= forms.BooleanField(required=False, initial=True,help_text="If set, PNP will be enabled and will graph Nagios Performance Data.")
broker_module=forms.CharField(help_text="Full path to your npcdmod.o broker module that shipped with your pnp4nagios installation")
config_file=forms.CharField(help_text="Full path to your npcd.cfg that shipped with your pnp4nagios installation")
event_broker_options=forms.IntegerField(initial="-1", help_text="Nagios's default of -1 is recommended here. PNP Documentation says you will need at least bits 2 and 3. Only change this if you know what you are doing.")
process_performance_data= forms.BooleanField(required=False, initial=True,help_text="PNP Needs the nagios option process_performance_data enabled to function. Make sure it is enabled.")
#apply_action_url = forms.BooleanField(required=False,initial=True,help_text="If set, apply action_url to every service object in nagios")
#action_url=forms.CharField(required=False,initial="/pnp4nagios/graph?host=$HOSTNAME$&srv=$SERVICEDESC$", help_text="Action url that your nagios objects can use to access perfdata")
def clean_broker_module(self):
""" Raises validation error if filename does not exist """
filename = self.cleaned_data['broker_module']
if not os.path.exists(filename):
raise forms.ValidationError('File not found')
return filename
def clean_config_file(self):
""" Raises validation error if filename does not exist """
filename = self.cleaned_data['config_file']
if not os.path.exists(filename):
raise forms.ValidationError('File not found')
return filename
def __init__(self, initial={}, *args,**kwargs):
my_initial = {}
Model.config.parse()
maincfg_values=Model.config.maincfg_values
self.nagios_configline = None
for k,v in Model.config.maincfg_values:
if k == 'broker_module' and v.find('npcdmod.o') > 0:
self.nagios_configline=v
v = v.split()
my_initial['broker_module']=v.pop(0)
for i in v:
if i.find('config_file=') > -1:
my_initial['config_file']=i.split('=',1)[1]
elif k == "event_broker_options":
my_initial[k] = v
# If view specified any initial values, they overwrite ours
for k,v in initial.items():
my_initial[k] = v
if 'broker_module' not in my_initial:
my_initial['broker_module'] = self.get_suggested_npcdmod_path()
if 'config_file' not in my_initial:
my_initial['config_file'] = self.get_suggested_npcd_path()
super(self.__class__,self).__init__(initial=my_initial,*args,**kwargs)
def get_suggested_npcdmod_path(self):
""" Returns best guess for full path to npcdmod.o file """
possible_locations = [
"/usr/lib/pnp4nagios/npcdmod.o",
"/usr/lib64/nagios/brokers/npcdmod.o",
]
for i in possible_locations:
if os.path.isfile(i):
return i
return i
def get_suggested_npcd_path(self):
""" Returns best guess for full path to npcd.cfg file """
possible_locations = [
"/etc/pnp4nagios/npcd.cfg"
]
for i in possible_locations:
if os.path.isfile(i):
return i
return i
def save(self):
if 'broker_module' in self.changed_data or 'config_file' in self.changed_data or self.nagios_configline is None:
v = "%s config_file=%s" % ( self.cleaned_data['broker_module'], self.cleaned_data['config_file'] )
Model.config._edit_static_file(attribute="broker_module", new_value=v, old_value = self.nagios_configline, append=True)
# We are supposed to handle process_performance_data attribute.. lets do that here
process_performance_data = "1" if self.cleaned_data['process_performance_data'] else "0"
Model.config._edit_static_file(attribute="process_performance_data", new_value=process_performance_data)
# Update event broker only if it has changed
name = "event_broker_options"
if name in self.changed_data:
Model.config._edit_static_file(attribute=name, new_value=self.cleaned_data[name])
class PerfDataForm(forms.Form):
perfdata = forms.CharField( widget=forms.Textarea(attrs={ 'wrap':'off', 'cols':'80'}) )
def save(self):
from pynag import Model
perfdata = self.cleaned_data['perfdata']
perfdata = Model.PerfData(perfdata)
self.results = perfdata.metrics
COMMAND_CHOICES = [('reload','reload'), ('status','status'),('restart','restart'),('stop','stop'),('start','start')]
class NagiosServiceForm(forms.Form):
""" Maintains control of the nagios service / reload / restart / etc """
#path_to_init_script = forms.CharField(help_text="Path to your nagios init script", initial=NAGIOS_INIT)
#nagios_binary = forms.CharField(help_text="Path to your nagios binary", initial=NAGIOS_BIN)
#command = forms.ChoiceField(choices=COMMAND_CHOICES)
def save(self):
#nagios_bin = self.cleaned_data['nagios_bin']
if "reload" in self.data:
command = "reload"
elif "restart" in self.data:
command = "restart"
elif "stop" in self.data:
command = "stop"
elif "start" in self.data:
command = "start"
elif "status" in self.data:
command = "status"
self.command = command
nagios_init = settings.nagios_init_script
#command = self.cleaned_data['command']
#from subprocess import Popen, PIPE
command = "%s %s" % (nagios_init, command)
#p = Popen(command.split(), stdout=PIPE, stderr=PIPE)
code,stdout,stderr = pynag.Utils.runCommand(command)
self.stdout = stdout or None
self.stderr = stderr or None
self.exit_code = code
class SendEmailForm(forms.Form):
""" Form used to send email to one or more contacts regarding particular services
"""
to = forms.CharField(
required=True,
help_text="E-mail address",
)
message = forms.CharField(
widget=forms.widgets.Textarea(attrs={'rows':15, 'cols':40}),
required = False,
help_text="Message that is to be sent to recipients",
)
add_myself_to_cc = forms.BooleanField(
required=False,
help_text="If checked, you will be added automatically to CC"
)
acknowledge_all_problems = forms.BooleanField(
required=False,
help_text="If checked, also acknowledge all problems as they are sent"
)
def __init__(self, remote_user, *args, **kwargs):
""" Create a new instance of SendEmailForm, contact name and email is used as from address.
"""
self.remote_user = remote_user
#self.contact_email = contact_email
self.html_content = "There is now HTML content with this message."
self.services = []
self._resolve_remote_user(self.remote_user)
super(self.__class__,self).__init__(*args,**kwargs)
def save(self):
subject = "%s sent you a a message through adagios" % self.remote_user
cc_address = []
from_address = self._resolve_remote_user( self.remote_user )
to_address = self.cleaned_data['to']
to_address = to_address.split(',')
text_content = self.cleaned_data['message']
# self.html_content is rendered in misc.views.mail()
html_content = text_content + "<p></p>" + self.html_content
if self.cleaned_data['add_myself_to_cc']:
cc_address.append(from_address)
if self.cleaned_data['acknowledge_all_problems']:
comment = "Sent mail to %s" % self.cleaned_data['to']
self.acknowledge_all_services(comment)
# Here we actually send some email:
msg = EmailMultiAlternatives(subject=subject, body=text_content, from_email=from_address, cc=cc_address, to=to_address)
msg.attach_alternative(html_content, "text/html")
msg.send()
def acknowledge_all_services(self, comment):
""" Acknowledge all problems in self.services()
"""
for i in self.services:
print "ack for %s" % i.get('description')
host_name = i.get('host_name')
service_description = i.get('description')
sticky = "1"
persistent = "0"
notify = "0"
author = self.remote_user
pynag.Control.Command.acknowledge_svc_problem(host_name=host_name,
service_description=service_description,
sticky=sticky,
persistent=persistent,
notify=notify,
author=author,
comment=comment)
def _resolve_remote_user(self, username):
""" Returns a valid "Full Name <email@example.com>" for remote http authenticated user.
If Remote user is a nagios contact, then return: Contact_Alias <contact_email>"
Else if remote user is a valid email address, return that address
Else return None
"""
livestatus = pynag.Parsers.mk_livestatus(nagios_cfg_file=settings.nagios_config)
try:
contact = livestatus.get_contact( username )
return "%s <%s>" % (contact.get('alias'), contact.get('email'))
except IndexError:
# If we get here, then remote_user does not exist as a contact.
return username | [
"palli@opensource.is"
] | palli@opensource.is |
e4cf3d7b27e696c7ad9997e4ac9f817cb0f3f306 | 9cb5521f247a47963e229a021ece638c2dd6c2ea | /4_genEquTileGrid.py | d60a85541e4020968cedc4af1a3bb8f74fdbd1ed | [
"MIT"
] | permissive | crpurcell/CORNISH-S-PIPELINE | f97a677bb6047d6291daac184b9992c66a54c98e | 32ebaf694bbd1a46a68fa875f8557243280bfe10 | refs/heads/master | 2021-01-12T02:48:31.985664 | 2017-06-09T05:54:14 | 2017-06-09T05:54:14 | 78,108,522 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,072 | py | #!/usr/bin/env python
#=============================================================================#
# #
# NAME: 4_genEquTileGrid.py #
# #
# USAGE: ./4_genEquTileGrid.py #
# #
# PURPOSE: Read the pointing coordinates from the mosaic files and lay down #
# grid of Equatorial tiles for image testing purposes. #
# #
# MODIFIED: 05-Jun-2017 by C. Purcell #
# #
#=============================================================================#
# Hardcoded paths
dataRootDir = "../DATA"
# CORNISH-South border out to edge of data in Galactic coordinates
# Make this large enough to account for data out to the primary beam FWHM
bMax_deg = +1.2
bMin_deg = -1.2
lMax_deg = 350.2
lMin_deg = 294.8
# Tile parameters
imSize_px = [2000, 2000] # pixels [x, y] tile size
pixScale_deg = [0.60/3600, 0.60/3600] # pixel scale [dx, dy]
overlap_deg = [60.0/3600, 60.0/3600] # overlap between tiles [x, y] (deg)
#-----------------------------------------------------------------------------#
import os
import sys
import copy
import glob
import re
import math as m
import numpy as np
from pyslalib import slalib
import pylab as pl
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from matplotlib.patches import Ellipse, RegularPolygon, Polygon, Patch
from matplotlib.collections import PatchCollection
import sqlite3
from Imports.util_ATCA_PIPE import sort_nicely
# Constants
C = 2.998e8
#-----------------------------------------------------------------------------#
def main():
# Create a polygon describing the Galactic border of the survey
# Oversample each edge and combine into an ordered set of vertices
lBorLst_deg = np.linspace(lMin_deg, lMax_deg, 5500).tolist()
bBorLst_deg = np.linspace(bMin_deg, bMax_deg, 220).tolist()
borderPolyGalLst = zip(lBorLst_deg, [bMin_deg]*len(lBorLst_deg))
borderPolyGalLst += zip([lMax_deg]*len(bBorLst_deg), bBorLst_deg,)[1:]
borderPolyGalLst += zip(lBorLst_deg[::-1], [bMax_deg]*len(lBorLst_deg))[1:]
borderPolyGalLst += zip([lMin_deg]*len(bBorLst_deg), bBorLst_deg[::-1])
borderPolyGalArr = np.array(borderPolyGalLst)
lRange_deg = lMax_deg - lMin_deg
bRange_deg = bMax_deg - bMin_deg
# Convert the Galactic polygon vertices into Equatorial coordinates and
# determine the maximum and minimum RA and Dec. limits
borderPolyEquLst = []
for e in borderPolyGalLst:
ra_rad, dec_rad = slalib.sla_galeq(m.radians(e[0]), m.radians(e[1]))
borderPolyEquLst.append( (m.degrees(ra_rad), m.degrees(dec_rad)) )
borderPolyEquArr = np.array(borderPolyEquLst)
raMax_deg = np.max(borderPolyEquArr[:,0])
raMin_deg = np.min(borderPolyEquArr[:,0])
decMax_deg = np.max(borderPolyEquArr[:,1])
decMin_deg = np.min(borderPolyEquArr[:,1])
raRange_deg = raMax_deg - raMin_deg
decRange_deg = decMax_deg - decMin_deg
# Calculate the constant Dec (y) increment between tile centres
yIncr_deg = imSize_px[1] * pixScale_deg[1] - overlap_deg[1]
#------------------------------------------------------------------------#
# NOTE:
# Start at the bottom-left of the Equ grid and advance along a Dec. line
# setting down tiles. Starting coordinate = decMin_deg, raMin_deg.
# Note: Placing tiles on lines of constant Dec does not take into account
# the curvature of the Dec lines as we approach the equatorial pole,
# however, it should be good enough if the overlap between the tiles is
# enough and the cos(Dec) factor is calculated at the most negative Dec.
#------------------------------------------------------------------------#
raCentTileLst_deg = []
decCentTileLst_deg = []
vertexTileEquLst_deg = []
vertexTileGalLst_deg = []
# Loop through Dec rows until decMax reached
i = 0
while True:
# Calculate the Dec at the centre top and bottom of the current row
decTileCent_deg = decMin_deg + (yIncr_deg - 2 * overlap_deg[1]) * i
decTileTop_deg = decTileCent_deg - yIncr_deg/2.0
decTileBot_deg = decTileCent_deg + yIncr_deg/2.0
# Calculate the RA increment for this row
cosDecCent = m.cos(m.radians(decTileCent_deg))
cosDecTop = m.cos(m.radians(decTileTop_deg))
cosDecBot = m.cos(m.radians(decTileBot_deg))
cosDec = min(cosDecCent, cosDecTop, cosDecBot)
xIncr_deg = (imSize_px[0] * pixScale_deg[0] - 2*overlap_deg[0])/cosDec
i += 1
# Loop through the RAs until raMax reached
j = 0
while True:
# Calculate RA for this tile
raTileCent_deg = raMin_deg + xIncr_deg * j
raCentTileLst_deg.append(raTileCent_deg)
decCentTileLst_deg.append(decTileCent_deg)
j += 1
# Calculate the tile corner coorinates in Equ
xIncrTop_deg = (imSize_px[0] * pixScale_deg[0])/cosDecTop
xIncrBot_deg = (imSize_px[0] * pixScale_deg[0])/cosDecBot
x1y2 = (raTileCent_deg + xIncrTop_deg / 2.0, decTileTop_deg)
x2y2 = (raTileCent_deg - xIncrTop_deg / 2.0, decTileTop_deg)
x2y1 = (raTileCent_deg - xIncrBot_deg / 2.0, decTileBot_deg)
x1y1 = (raTileCent_deg + xIncrBot_deg / 2.0, decTileBot_deg)
vertexTileEquLst_deg.append(np.array([x1y1,x1y2,x2y2,x2y1]))
# Calculate the tile corner coordinates in Gal
lV_rad, bV_rad = slalib.sla_eqgal(m.radians(x1y2[0]),
m.radians(x1y2[1]))
x1y2 = (m.degrees(lV_rad), m.degrees(bV_rad))
lV_rad, bV_rad = slalib.sla_eqgal(m.radians(x2y2[0]),
m.radians(x2y2[1]))
x2y2 = (m.degrees(lV_rad), m.degrees(bV_rad))
lV_rad, bV_rad = slalib.sla_eqgal(m.radians(x2y1[0]),
m.radians(x2y1[1]))
x2y1 = (m.degrees(lV_rad), m.degrees(bV_rad))
lV_rad, bV_rad = slalib.sla_eqgal(m.radians(x1y1[0]),
m.radians(x1y1[1]))
x1y1 = (m.degrees(lV_rad), m.degrees(bV_rad))
vertexTileGalLst_deg.append(np.array([x1y1,x1y2,x2y2,x2y1]))
# End of RA While loop
if raTileCent_deg>=raMax_deg:
break
# End of Dec While loop
if decTileCent_deg>=decMax_deg:
break
# Convert the tile centre coordinates to Galactic
lCentTileLst_deg = []
bCentTileLst_deg = []
for i in range(len(raCentTileLst_deg)):
l_rad, b_rad = slalib.sla_eqgal(m.radians(raCentTileLst_deg[i]),
m.radians(decCentTileLst_deg[i]))
lCentTileLst_deg.append(m.degrees(l_rad))
bCentTileLst_deg.append(m.degrees(b_rad))
# Filter both Equ and Gal lists for tiles outside the survey area
# Must iterate from highest index when using 'pop' function
for i in range(len(raCentTileLst_deg)-1, -1, -1):
if not (lCentTileLst_deg[i]>=lMin_deg and
lCentTileLst_deg[i]<=lMax_deg and
bCentTileLst_deg[i]>=bMin_deg and
bCentTileLst_deg[i]<=bMax_deg):
lCentTileLst_deg.pop(i)
bCentTileLst_deg.pop(i)
raCentTileLst_deg.pop(i)
decCentTileLst_deg.pop(i)
vertexTileEquLst_deg.pop(i)
vertexTileGalLst_deg.pop(i)
# Sort the list of tiles into increasing RA
multiLst = zip(raCentTileLst_deg,
decCentTileLst_deg,
lCentTileLst_deg,
bCentTileLst_deg)
multiLst.sort()
(raCentTileLst_deg,
decCentTileLst_deg,
lCentTileLst_deg,
bCentTileLst_deg) = zip(*multiLst)
# Create the remaining columns (ID, pixel-scale, num-pixels
tileIDLst = range(1, len(lCentTileLst_deg)+1)
pixScaleXLst_asec = [pixScale_deg[0]*3600.0] * len(lCentTileLst_deg)
pixScaleYLst_asec = [pixScale_deg[1]*3600.0] * len(lCentTileLst_deg)
nPixXLst = [imSize_px[0]]* len(lCentTileLst_deg)
nPixYLst = [imSize_px[1]]* len(lCentTileLst_deg)
#------------------------------------------------------------------------#
# Upload the tile parameters into each database file in the data directory
dbFileLst = glob.glob(dataRootDir + '/*.sqlite')
# Loop through the database files
for dbFile in dbFileLst:
print ">> Writing tile_coords table to %s ..." % dbFile
# Connect to the database
conn = sqlite3.connect(dbFile)
cursor = conn.cursor()
# Drop old tile_coords table and create a new one
sql = "DROP TABLE IF EXISTS tile_coords"
cursor.execute(sql)
sql = """
CREATE TABLE tile_coords (
tileID INTEGER PRIMARY KEY,
RA_deg DOUBLE,
Dec_deg DOUBLE,
l_deg DOUBLE,
b_deg DOUBLE,
pixScaleX_asec DOUBLE,
pixscaleY_asec DOUBLE,
nPixX INTEGER,
nPixY INTEGER );
"""
cursor.execute(sql)
# Insert the entries into the table
for i in range(len(raCentTileLst_deg)):
sql = """
INSERT INTO tile_coords
(tileID,
RA_deg,
Dec_deg,
l_deg,
b_deg,
pixScaleX_asec,
pixscaleY_asec,
nPixX,
nPixY)
"""
sql += 'VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?) '
vals = [tileIDLst[i],
raCentTileLst_deg[i],
decCentTileLst_deg[i],
lCentTileLst_deg[i],
bCentTileLst_deg[i],
pixScaleXLst_asec[i],
pixScaleYLst_asec[i],
nPixXLst[i],
nPixYLst[i]]
cursor.execute(sql, vals)
# Commit changed to the database and close connection
conn.commit()
cursor.close()
conn.close()
#------------------------------------------------------------------------#
#------------------------------------------------------------------------#
# Plot the tile centres over the survey border
fig = plt.figure(figsize=(18.0, 10.0))
# EQUATORIAL PLOT -------------------------------------------------------#
ax1 = fig.add_axes([0.08, 0.4, 0.88, 0.58])
# Plot the tile centres and vertices
ax1.scatter(np.array(raCentTileLst_deg)/15.0, decCentTileLst_deg, s=2,
zorder=2)
squares = []
for vertex in vertexTileEquLst_deg:
vertex = np.array(vertex)
vertex[:,0]/=15.0
square = Polygon(xy=np.array(vertex), closed=True)
squares.append(square)
s = PatchCollection(squares, alpha=1.0, edgecolor='black',
facecolor='none', zorder=3)
ax1.add_collection(s)
# Plot the border and format the axis
ax1.plot(borderPolyEquArr[:,0]/15.0, borderPolyEquArr[:,1])
ax1.yaxis.grid(True, which='major')
ax1.xaxis.grid(True, which='major')
ax1.set_xlim((raMax_deg+0.01*raRange_deg)/15,
(raMin_deg-0.01*raRange_deg)/15)
ax1.set_ylim(decMin_deg-0.05*decRange_deg, decMax_deg+0.05*decRange_deg)
ax1.set_aspect(1.0/15.0/cosDec)
ax1.set_ylabel('Dec. (deg)')
ax1.set_xlabel('R.A. (hrs)')
# Annotate the Equatorial plot with tile numbers
# for i in range(len(raCentTileLst_deg)):
# ax1.annotate(str(tileIDLst[i]),
# xy=(raCentTileLst_deg[i]/15.0, decCentTileLst_deg[i]),
# horizontalalignment='center',
# verticalalignment='center',
# fontsize=6,
# textcoords='data',
# clip_on=True, backgroundcolor='w')
# GALACTIC PLOT ---------------------------------------------------------#
ax2 = fig.add_axes([0.08, 0.05, 0.88, 0.30])
ax2.plot(borderPolyGalArr[:,0], borderPolyGalArr[:,1])
# Plot the tile centres and vertices
#ax2.scatter(np.array(lCentTileLst_deg), bCentTileLst_deg, s=2,
# zorder=2)
squares = []
for vertex in vertexTileGalLst_deg:
square = Polygon(xy=np.array(vertex), closed=True)
squares.append(square)
s = PatchCollection(squares, alpha=1.0, edgecolor='black',
facecolor='none', zorder=3)
ax2.add_collection(s)
# Plot the border and format the axis
ax2.yaxis.set_major_locator(MaxNLocator(4))
ax2.yaxis.grid(True, which='major')
ax2.xaxis.grid(True, which='major')
ax2.set_xlim(lMax_deg+0.02*lRange_deg, lMin_deg-0.02*lRange_deg)
ax2.set_ylim(bMin_deg-0.19*bRange_deg, bMax_deg+0.19*bRange_deg)
ax2.set_aspect(1.0)
ax2.set_ylabel('Glong. (deg)')
ax2.set_xlabel('Glat. (deg)')
# Annotate the Galactic plot with tile numbers
for i in range(len(lCentTileLst_deg)):
ax2.annotate(str(tileIDLst[i]),
xy=(lCentTileLst_deg[i], bCentTileLst_deg[i]),
horizontalalignment='center',
verticalalignment='center',
fontsize=8,
textcoords='data',
clip_on=True)
fig.show()
fig.savefig('tile_layout.pdf')
print "Press <RETURN> to exit ..."
raw_input()
#-----------------------------------------------------------------------------#
if __name__ == "__main__":
main()
| [
"cormac.r.purcell@gmail.com"
] | cormac.r.purcell@gmail.com |
9510e335e0222d984df0099559069d0d714c1cbf | ced309b456bbca6a6acd63fe134edf8c400bb3f5 | /Django/saver.py | f74f04445da41deb80fafc4c7f5c50797ae1fd8e | [
"MIT"
] | permissive | JOHNKYON/Data_save | 0f2e1e7393ab32b28f3aa85ee24aa9c543d053b5 | 8d6820e2d6923cf5ca038abd39da8f42793d9ad7 | refs/heads/master | 2021-01-24T18:08:34.521893 | 2017-03-15T05:18:16 | 2017-03-15T05:18:16 | 84,408,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 843 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import generators
from __future__ import nested_scopes
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
from django.http import HttpResponse
from django.shortcuts import render_to_response
import codecs
def test_transfer(request):
return render_to_response('test_page.html')
def parameter_saver(request):
output_file = codecs.open('data/data.txt', 'a', encoding='utf8')
if request.GET:
output_file.write(str(request.GET) + '\n')
return HttpResponse("GET")
elif request.POST:
output_file.write(str(request.POST) + '\n')
return HttpResponse("POST")
output_file.close()
return HttpResponse("Hello World!") | [
"wjfwzzc@gmail.com"
] | wjfwzzc@gmail.com |
7b2e7475bab47ca81e443274773121ef956d6117 | 0626388c2e9a1db06a49b90ea71420f4f7f30273 | /ai_blog/wsgi.py | 3889b1e9a9f261cc98c071e62f83aff08d3dc7ae | [] | no_license | Charnel2500/blog_ai_py385_django311 | 2382c1a4fdee4d528d5d37038efb35f2b85cd407 | 0f44cce326c3ccb3fcbf262d772a78aedada92df | refs/heads/master | 2023-01-01T23:39:54.703597 | 2020-10-27T23:49:56 | 2020-10-27T23:49:56 | 300,914,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for ai_blog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ai_blog.settings')
application = get_wsgi_application()
| [
"jakubrhryc@gmail.com"
] | jakubrhryc@gmail.com |
c1fa7ddb3cb26e3ba1673fd853a21c0bcecf5b52 | 865ee6eb8ee52c8056fbf406059c5481f365de6e | /openresty-win32-build/thirdparty/x86/pgsql/pgAdmin 4/venv/Lib/site-packages/typing.py | 693945365d671e3b1c522b0d19caa4eba538a5c8 | [
"MIT",
"LicenseRef-scancode-ssleay",
"BSD-3-Clause",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"LicenseRef-scancode-pcre",
"LicenseRef-scancode-public-domain",
"Zlib",
"BSD-2-Clause"
] | permissive | nneesshh/openresty-oss | 76e119081ea06bc82b184f96d531cc756b716c9d | bfbb9d7526020eda1788a0ed24f2be3c8be5c1c3 | refs/heads/master | 2022-12-12T21:39:48.917622 | 2019-05-31T03:14:18 | 2019-05-31T03:14:18 | 184,213,410 | 1 | 0 | MIT | 2022-12-06T17:28:59 | 2019-04-30T07:28:45 | C | UTF-8 | Python | false | false | 71,396 | py | from __future__ import absolute_import, unicode_literals
import abc
from abc import abstractmethod, abstractproperty
import collections
import functools
import re as stdlib_re # Avoid confusion with the re we export.
import sys
import types
import copy
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc # Fallback for PY3.2.
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'Any',
'Callable',
'ClassVar',
'Generic',
'Optional',
'Tuple',
'Type',
'TypeVar',
'Union',
# ABCs (from collections.abc).
'AbstractSet', # collections.abc.Set.
'GenericMeta', # subclass of abc.ABCMeta and a metaclass
# for 'Generic' and ABCs below.
'ByteString',
'Container',
'ContextManager',
'Hashable',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'Mapping',
'MappingView',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Sequence',
'Sized',
'ValuesView',
# Structural checks, a.k.a. protocols.
'Reversible',
'SupportsAbs',
'SupportsComplex',
'SupportsFloat',
'SupportsInt',
# Concrete collection types.
'Counter',
'Deque',
'Dict',
'DefaultDict',
'List',
'Set',
'FrozenSet',
'NamedTuple', # Not really a type.
'Generator',
# One-off things.
'AnyStr',
'cast',
'get_type_hints',
'NewType',
'no_type_check',
'no_type_check_decorator',
'overload',
'Text',
'TYPE_CHECKING',
]
# The pseudo-submodules 're' and 'io' are part of the public
# namespace, but excluded from __all__ because they might stomp on
# legitimate imports of those modules.
def _qualname(x):
if sys.version_info[:2] >= (3, 3):
return x.__qualname__
else:
# Fall back to just name.
return x.__name__
def _trim_name(nm):
whitelist = ('_TypeAlias', '_ForwardRef', '_TypingBase', '_FinalTypingBase')
if nm.startswith('_') and nm not in whitelist:
nm = nm[1:]
return nm
class TypingMeta(type):
"""Metaclass for most types defined in typing module
(not a part of public API).
This also defines a dummy constructor (all the work for most typing
constructs is done in __new__) and a nicer repr().
"""
_is_protocol = False
def __new__(cls, name, bases, namespace):
return super(TypingMeta, cls).__new__(cls, str(name), bases, namespace)
@classmethod
def assert_no_subclassing(cls, bases):
for base in bases:
if isinstance(base, cls):
raise TypeError("Cannot subclass %s" %
(', '.join(map(_type_repr, bases)) or '()'))
def __init__(self, *args, **kwds):
pass
def _eval_type(self, globalns, localns):
"""Override this in subclasses to interpret forward references.
For example, List['C'] is internally stored as
List[_ForwardRef('C')], which should evaluate to List[C],
where C is an object found in globalns or localns (searching
localns first, of course).
"""
return self
def _get_type_vars(self, tvars):
pass
def __repr__(self):
qname = _trim_name(_qualname(self))
return '%s.%s' % (self.__module__, qname)
class _TypingBase(object):
"""Internal indicator of special typing constructs."""
__metaclass__ = TypingMeta
__slots__ = ('__weakref__',)
def __init__(self, *args, **kwds):
pass
def __new__(cls, *args, **kwds):
"""Constructor.
This only exists to give a better error message in case
someone tries to subclass a special typing object (not a good idea).
"""
if (len(args) == 3 and
isinstance(args[0], str) and
isinstance(args[1], tuple)):
# Close enough.
raise TypeError("Cannot subclass %r" % cls)
return super(_TypingBase, cls).__new__(cls)
# Things that are not classes also need these.
def _eval_type(self, globalns, localns):
return self
def _get_type_vars(self, tvars):
pass
def __repr__(self):
cls = type(self)
qname = _trim_name(_qualname(cls))
return '%s.%s' % (cls.__module__, qname)
def __call__(self, *args, **kwds):
raise TypeError("Cannot instantiate %r" % type(self))
class _FinalTypingBase(_TypingBase):
"""Internal mix-in class to prevent instantiation.
Prevents instantiation unless _root=True is given in class call.
It is used to create pseudo-singleton instances Any, Union, Optional, etc.
"""
__slots__ = ()
def __new__(cls, *args, **kwds):
self = super(_FinalTypingBase, cls).__new__(cls, *args, **kwds)
if '_root' in kwds and kwds['_root'] is True:
return self
raise TypeError("Cannot instantiate %r" % cls)
def __reduce__(self):
return _trim_name(type(self).__name__)
class _ForwardRef(_TypingBase):
"""Internal wrapper to hold a forward reference."""
__slots__ = ('__forward_arg__', '__forward_code__',
'__forward_evaluated__', '__forward_value__')
def __init__(self, arg):
super(_ForwardRef, self).__init__(arg)
if not isinstance(arg, basestring):
raise TypeError('Forward reference must be a string -- got %r' % (arg,))
try:
code = compile(arg, '<string>', 'eval')
except SyntaxError:
raise SyntaxError('Forward reference must be an expression -- got %r' %
(arg,))
self.__forward_arg__ = arg
self.__forward_code__ = code
self.__forward_evaluated__ = False
self.__forward_value__ = None
def _eval_type(self, globalns, localns):
if not self.__forward_evaluated__ or localns is not globalns:
if globalns is None and localns is None:
globalns = localns = {}
elif globalns is None:
globalns = localns
elif localns is None:
localns = globalns
self.__forward_value__ = _type_check(
eval(self.__forward_code__, globalns, localns),
"Forward references must evaluate to types.")
self.__forward_evaluated__ = True
return self.__forward_value__
def __eq__(self, other):
if not isinstance(other, _ForwardRef):
return NotImplemented
return (self.__forward_arg__ == other.__forward_arg__ and
self.__forward_value__ == other.__forward_value__)
def __hash__(self):
return hash((self.__forward_arg__, self.__forward_value__))
def __instancecheck__(self, obj):
raise TypeError("Forward references cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Forward references cannot be used with issubclass().")
def __repr__(self):
return '_ForwardRef(%r)' % (self.__forward_arg__,)
class _TypeAlias(_TypingBase):
"""Internal helper class for defining generic variants of concrete types.
Note that this is not a type; let's call it a pseudo-type. It cannot
be used in instance and subclass checks in parameterized form, i.e.
``isinstance(42, Match[str])`` raises ``TypeError`` instead of returning
``False``.
"""
__slots__ = ('name', 'type_var', 'impl_type', 'type_checker')
def __init__(self, name, type_var, impl_type, type_checker):
"""Initializer.
Args:
name: The name, e.g. 'Pattern'.
type_var: The type parameter, e.g. AnyStr, or the
specific type, e.g. str.
impl_type: The implementation type.
type_checker: Function that takes an impl_type instance.
and returns a value that should be a type_var instance.
"""
assert isinstance(name, basestring), repr(name)
assert isinstance(impl_type, type), repr(impl_type)
assert not isinstance(impl_type, TypingMeta), repr(impl_type)
assert isinstance(type_var, (type, _TypingBase)), repr(type_var)
self.name = name
self.type_var = type_var
self.impl_type = impl_type
self.type_checker = type_checker
def __repr__(self):
return "%s[%s]" % (self.name, _type_repr(self.type_var))
def __getitem__(self, parameter):
if not isinstance(self.type_var, TypeVar):
raise TypeError("%s cannot be further parameterized." % self)
if self.type_var.__constraints__ and isinstance(parameter, type):
if not issubclass(parameter, self.type_var.__constraints__):
raise TypeError("%s is not a valid substitution for %s." %
(parameter, self.type_var))
if isinstance(parameter, TypeVar) and parameter is not self.type_var:
raise TypeError("%s cannot be re-parameterized." % self)
return self.__class__(self.name, parameter,
self.impl_type, self.type_checker)
def __eq__(self, other):
if not isinstance(other, _TypeAlias):
return NotImplemented
return self.name == other.name and self.type_var == other.type_var
def __hash__(self):
return hash((self.name, self.type_var))
def __instancecheck__(self, obj):
if not isinstance(self.type_var, TypeVar):
raise TypeError("Parameterized type aliases cannot be used "
"with isinstance().")
return isinstance(obj, self.impl_type)
def __subclasscheck__(self, cls):
if not isinstance(self.type_var, TypeVar):
raise TypeError("Parameterized type aliases cannot be used "
"with issubclass().")
return issubclass(cls, self.impl_type)
def _get_type_vars(types, tvars):
for t in types:
if isinstance(t, TypingMeta) or isinstance(t, _TypingBase):
t._get_type_vars(tvars)
def _type_vars(types):
tvars = []
_get_type_vars(types, tvars)
return tuple(tvars)
def _eval_type(t, globalns, localns):
if isinstance(t, TypingMeta) or isinstance(t, _TypingBase):
return t._eval_type(globalns, localns)
return t
def _type_check(arg, msg):
"""Check that the argument is a type, and return it (internal helper).
As a special case, accept None and return type(None) instead.
Also, _TypeAlias instances (e.g. Match, Pattern) are acceptable.
The msg argument is a human-readable error message, e.g.
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
if arg is None:
return type(None)
if isinstance(arg, basestring):
arg = _ForwardRef(arg)
if (
isinstance(arg, _TypingBase) and type(arg).__name__ == '_ClassVar' or
not isinstance(arg, (type, _TypingBase)) and not callable(arg)
):
raise TypeError(msg + " Got %.100r." % (arg,))
# Bare Union etc. are not valid as type arguments
if (
type(arg).__name__ in ('_Union', '_Optional') and
not getattr(arg, '__origin__', None) or
isinstance(arg, TypingMeta) and arg._gorg in (Generic, _Protocol)
):
raise TypeError("Plain %s is not valid as type argument" % arg)
return arg
def _type_repr(obj):
"""Return the repr() of an object, special-casing types (internal helper).
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, type) and not isinstance(obj, TypingMeta):
if obj.__module__ == '__builtin__':
return _qualname(obj)
return '%s.%s' % (obj.__module__, _qualname(obj))
if obj is Ellipsis:
return('...')
if isinstance(obj, types.FunctionType):
return obj.__name__
return repr(obj)
class ClassVarMeta(TypingMeta):
"""Metaclass for _ClassVar"""
def __new__(cls, name, bases, namespace):
cls.assert_no_subclassing(bases)
self = super(ClassVarMeta, cls).__new__(cls, name, bases, namespace)
return self
class _ClassVar(_FinalTypingBase):
"""Special type construct to mark class variables.
An annotation wrapped in ClassVar indicates that a given
attribute is intended to be used as a class variable and
should not be set on instances of that class. Usage::
class Starship:
stats = {} # type: ClassVar[Dict[str, int]] # class variable
damage = 10 # type: int # instance variable
ClassVar accepts only types and cannot be further subscribed.
Note that ClassVar is not a class itself, and should not
be used with isinstance() or issubclass().
"""
__metaclass__ = ClassVarMeta
__slots__ = ('__type__',)
def __init__(self, tp=None, _root=False):
self.__type__ = tp
def __getitem__(self, item):
cls = type(self)
if self.__type__ is None:
return cls(_type_check(item,
'{} accepts only types.'.format(cls.__name__[1:])),
_root=True)
raise TypeError('{} cannot be further subscripted'
.format(cls.__name__[1:]))
def _eval_type(self, globalns, localns):
return type(self)(_eval_type(self.__type__, globalns, localns),
_root=True)
def __repr__(self):
r = super(_ClassVar, self).__repr__()
if self.__type__ is not None:
r += '[{}]'.format(_type_repr(self.__type__))
return r
def __hash__(self):
return hash((type(self).__name__, self.__type__))
def __eq__(self, other):
if not isinstance(other, _ClassVar):
return NotImplemented
if self.__type__ is not None:
return self.__type__ == other.__type__
return self is other
ClassVar = _ClassVar(_root=True)
class AnyMeta(TypingMeta):
"""Metaclass for Any."""
def __new__(cls, name, bases, namespace):
cls.assert_no_subclassing(bases)
self = super(AnyMeta, cls).__new__(cls, name, bases, namespace)
return self
class _Any(_FinalTypingBase):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
or class checks.
"""
__metaclass__ = AnyMeta
__slots__ = ()
def __instancecheck__(self, obj):
raise TypeError("Any cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Any cannot be used with issubclass().")
Any = _Any(_root=True)
class NoReturnMeta(TypingMeta):
"""Metaclass for NoReturn."""
def __new__(cls, name, bases, namespace):
cls.assert_no_subclassing(bases)
self = super(NoReturnMeta, cls).__new__(cls, name, bases, namespace)
return self
class _NoReturn(_FinalTypingBase):
"""Special type indicating functions that never return.
Example::
from typing import NoReturn
def stop() -> NoReturn:
raise Exception('no way')
This type is invalid in other positions, e.g., ``List[NoReturn]``
will fail in static type checkers.
"""
__metaclass__ = NoReturnMeta
__slots__ = ()
def __instancecheck__(self, obj):
raise TypeError("NoReturn cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("NoReturn cannot be used with issubclass().")
NoReturn = _NoReturn(_root=True)
class TypeVarMeta(TypingMeta):
def __new__(cls, name, bases, namespace):
cls.assert_no_subclassing(bases)
return super(TypeVarMeta, cls).__new__(cls, name, bases, namespace)
class TypeVar(_TypingBase):
"""Type variable.
Usage::
T = TypeVar('T') # Can be anything
A = TypeVar('A', str, bytes) # Must be str or bytes
Type variables exist primarily for the benefit of static type
checkers. They serve as the parameters for generic types as well
as for generic function definitions. See class Generic for more
information on generic types. Generic functions work as follows:
def repeat(x: T, n: int) -> List[T]:
'''Return a list containing n references to x.'''
return [x]*n
def longest(x: A, y: A) -> A:
'''Return the longest of two strings.'''
return x if len(x) >= len(y) else y
The latter example's signature is essentially the overloading
of (str, str) -> str and (bytes, bytes) -> bytes. Also note
that if the arguments are instances of some subclass of str,
the return type is still plain str.
At runtime, isinstance(x, T) and issubclass(C, T) will raise TypeError.
Type variables defined with covariant=True or contravariant=True
can be used do declare covariant or contravariant generic types.
See PEP 484 for more details. By default generic types are invariant
in all type variables.
Type variables can be introspected. e.g.:
T.__name__ == 'T'
T.__constraints__ == ()
T.__covariant__ == False
T.__contravariant__ = False
A.__constraints__ == (str, bytes)
"""
__metaclass__ = TypeVarMeta
__slots__ = ('__name__', '__bound__', '__constraints__',
'__covariant__', '__contravariant__')
def __init__(self, name, *constraints, **kwargs):
super(TypeVar, self).__init__(name, *constraints, **kwargs)
bound = kwargs.get('bound', None)
covariant = kwargs.get('covariant', False)
contravariant = kwargs.get('contravariant', False)
self.__name__ = name
if covariant and contravariant:
raise ValueError("Bivariant types are not supported.")
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if constraints and bound is not None:
raise TypeError("Constraints cannot be combined with bound=...")
if constraints and len(constraints) == 1:
raise TypeError("A single constraint is not allowed")
msg = "TypeVar(name, constraint, ...): constraints must be types."
self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
if bound:
self.__bound__ = _type_check(bound, "Bound must be a type.")
else:
self.__bound__ = None
def _get_type_vars(self, tvars):
if self not in tvars:
tvars.append(self)
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __instancecheck__(self, instance):
raise TypeError("Type variables cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Type variables cannot be used with issubclass().")
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = TypeVar('T') # Any type.
KT = TypeVar('KT') # Key type.
VT = TypeVar('VT') # Value type.
T_co = TypeVar('T_co', covariant=True) # Any type covariant containers.
V_co = TypeVar('V_co', covariant=True) # Any type covariant containers.
VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers.
T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant.
# A useful type variable with constraints. This represents string types.
# (This one *is* for export!)
AnyStr = TypeVar('AnyStr', bytes, unicode)
def _replace_arg(arg, tvars, args):
"""An internal helper function: replace arg if it is a type variable
found in tvars with corresponding substitution from args or
with corresponding substitution sub-tree if arg is a generic type.
"""
if tvars is None:
tvars = []
if hasattr(arg, '_subs_tree') and isinstance(arg, (GenericMeta, _TypingBase)):
return arg._subs_tree(tvars, args)
if isinstance(arg, TypeVar):
for i, tvar in enumerate(tvars):
if arg == tvar:
return args[i]
return arg
# Special typing constructs Union, Optional, Generic, Callable and Tuple
# use three special attributes for internal bookkeeping of generic types:
# * __parameters__ is a tuple of unique free type parameters of a generic
# type, for example, Dict[T, T].__parameters__ == (T,);
# * __origin__ keeps a reference to a type that was subscripted,
# e.g., Union[T, int].__origin__ == Union;
# * __args__ is a tuple of all arguments used in subscripting,
# e.g., Dict[T, int].__args__ == (T, int).
def _subs_tree(cls, tvars=None, args=None):
"""An internal helper function: calculate substitution tree
for generic cls after replacing its type parameters with
substitutions in tvars -> args (if any).
Repeat the same following __origin__'s.
Return a list of arguments with all possible substitutions
performed. Arguments that are generic classes themselves are represented
as tuples (so that no new classes are created by this function).
For example: _subs_tree(List[Tuple[int, T]][str]) == [(Tuple, int, str)]
"""
if cls.__origin__ is None:
return cls
# Make of chain of origins (i.e. cls -> cls.__origin__)
current = cls.__origin__
orig_chain = []
while current.__origin__ is not None:
orig_chain.append(current)
current = current.__origin__
# Replace type variables in __args__ if asked ...
tree_args = []
for arg in cls.__args__:
tree_args.append(_replace_arg(arg, tvars, args))
# ... then continue replacing down the origin chain.
for ocls in orig_chain:
new_tree_args = []
for arg in ocls.__args__:
new_tree_args.append(_replace_arg(arg, ocls.__parameters__, tree_args))
tree_args = new_tree_args
return tree_args
def _remove_dups_flatten(parameters):
"""An internal helper for Union creation and substitution: flatten Union's
among parameters, then remove duplicates and strict subclasses.
"""
# Flatten out Union[Union[...], ...].
params = []
for p in parameters:
if isinstance(p, _Union) and p.__origin__ is Union:
params.extend(p.__args__)
elif isinstance(p, tuple) and len(p) > 0 and p[0] is Union:
params.extend(p[1:])
else:
params.append(p)
# Weed out strict duplicates, preserving the first of each occurrence.
all_params = set(params)
if len(all_params) < len(params):
new_params = []
for t in params:
if t in all_params:
new_params.append(t)
all_params.remove(t)
params = new_params
assert not all_params, all_params
# Weed out subclasses.
# E.g. Union[int, Employee, Manager] == Union[int, Employee].
# If object is present it will be sole survivor among proper classes.
# Never discard type variables.
# (In particular, Union[str, AnyStr] != AnyStr.)
all_params = set(params)
for t1 in params:
if not isinstance(t1, type):
continue
if any(isinstance(t2, type) and issubclass(t1, t2)
for t2 in all_params - {t1}
if not (isinstance(t2, GenericMeta) and
t2.__origin__ is not None)):
all_params.remove(t1)
return tuple(t for t in params if t in all_params)
def _check_generic(cls, parameters):
# Check correct count for parameters of a generic cls (internal helper).
if not cls.__parameters__:
raise TypeError("%s is not a generic class" % repr(cls))
alen = len(parameters)
elen = len(cls.__parameters__)
if alen != elen:
raise TypeError("Too %s parameters for %s; actual %s, expected %s" %
("many" if alen > elen else "few", repr(cls), alen, elen))
_cleanups = []
def _tp_cache(func):
maxsize = 128
cache = {}
_cleanups.append(cache.clear)
@functools.wraps(func)
def inner(*args):
key = args
try:
return cache[key]
except TypeError:
# Assume it's an unhashable argument.
return func(*args)
except KeyError:
value = func(*args)
if len(cache) >= maxsize:
# If the cache grows too much, just start over.
cache.clear()
cache[key] = value
return value
return inner
class UnionMeta(TypingMeta):
"""Metaclass for Union."""
def __new__(cls, name, bases, namespace):
cls.assert_no_subclassing(bases)
return super(UnionMeta, cls).__new__(cls, name, bases, namespace)
class _Union(_FinalTypingBase):
"""Union type; Union[X, Y] means either X or Y.
To define a union, use e.g. Union[int, str]. Details:
- The arguments must be types and there must be at least one.
- None as an argument is a special case and is replaced by
type(None).
- Unions of unions are flattened, e.g.::
Union[Union[int, str], float] == Union[int, str, float]
- Unions of a single argument vanish, e.g.::
Union[int] == int # The constructor actually returns int
- Redundant arguments are skipped, e.g.::
Union[int, str, int] == Union[int, str]
- When comparing unions, the argument order is ignored, e.g.::
Union[int, str] == Union[str, int]
- When two arguments have a subclass relationship, the least
derived argument is kept, e.g.::
class Employee: pass
class Manager(Employee): pass
Union[int, Employee, Manager] == Union[int, Employee]
Union[Manager, int, Employee] == Union[int, Employee]
Union[Employee, Manager] == Employee
- Similar for object::
Union[int, object] == object
- You cannot subclass or instantiate a union.
- You can use Optional[X] as a shorthand for Union[X, None].
"""
__metaclass__ = UnionMeta
__slots__ = ('__parameters__', '__args__', '__origin__', '__tree_hash__')
def __new__(cls, parameters=None, origin=None, *args, **kwds):
self = super(_Union, cls).__new__(cls, parameters, origin, *args, **kwds)
if origin is None:
self.__parameters__ = None
self.__args__ = None
self.__origin__ = None
self.__tree_hash__ = hash(frozenset(('Union',)))
return self
if not isinstance(parameters, tuple):
raise TypeError("Expected parameters=<tuple>")
if origin is Union:
parameters = _remove_dups_flatten(parameters)
# It's not a union if there's only one type left.
if len(parameters) == 1:
return parameters[0]
self.__parameters__ = _type_vars(parameters)
self.__args__ = parameters
self.__origin__ = origin
# Pre-calculate the __hash__ on instantiation.
# This improves speed for complex substitutions.
subs_tree = self._subs_tree()
if isinstance(subs_tree, tuple):
self.__tree_hash__ = hash(frozenset(subs_tree))
else:
self.__tree_hash__ = hash(subs_tree)
return self
def _eval_type(self, globalns, localns):
if self.__args__ is None:
return self
ev_args = tuple(_eval_type(t, globalns, localns) for t in self.__args__)
ev_origin = _eval_type(self.__origin__, globalns, localns)
if ev_args == self.__args__ and ev_origin == self.__origin__:
# Everything is already evaluated.
return self
return self.__class__(ev_args, ev_origin, _root=True)
def _get_type_vars(self, tvars):
if self.__origin__ and self.__parameters__:
_get_type_vars(self.__parameters__, tvars)
def __repr__(self):
if self.__origin__ is None:
return super(_Union, self).__repr__()
tree = self._subs_tree()
if not isinstance(tree, tuple):
return repr(tree)
return tree[0]._tree_repr(tree)
def _tree_repr(self, tree):
arg_list = []
for arg in tree[1:]:
if not isinstance(arg, tuple):
arg_list.append(_type_repr(arg))
else:
arg_list.append(arg[0]._tree_repr(arg))
return super(_Union, self).__repr__() + '[%s]' % ', '.join(arg_list)
@_tp_cache
def __getitem__(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Union of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if self.__origin__ is None:
msg = "Union[arg, ...]: each arg must be a type."
else:
msg = "Parameters to generic types must be types."
parameters = tuple(_type_check(p, msg) for p in parameters)
if self is not Union:
_check_generic(self, parameters)
return self.__class__(parameters, origin=self, _root=True)
def _subs_tree(self, tvars=None, args=None):
if self is Union:
return Union # Nothing to substitute
tree_args = _subs_tree(self, tvars, args)
tree_args = _remove_dups_flatten(tree_args)
if len(tree_args) == 1:
return tree_args[0] # Union of a single type is that type
return (Union,) + tree_args
def __eq__(self, other):
if isinstance(other, _Union):
return self.__tree_hash__ == other.__tree_hash__
elif self is not Union:
return self._subs_tree() == other
else:
return self is other
def __hash__(self):
return self.__tree_hash__
def __instancecheck__(self, obj):
raise TypeError("Unions cannot be used with isinstance().")
def __subclasscheck__(self, cls):
raise TypeError("Unions cannot be used with issubclass().")
Union = _Union(_root=True)
class OptionalMeta(TypingMeta):
"""Metaclass for Optional."""
def __new__(cls, name, bases, namespace):
cls.assert_no_subclassing(bases)
return super(OptionalMeta, cls).__new__(cls, name, bases, namespace)
class _Optional(_FinalTypingBase):
"""Optional type.
Optional[X] is equivalent to Union[X, None].
"""
__metaclass__ = OptionalMeta
__slots__ = ()
@_tp_cache
def __getitem__(self, arg):
arg = _type_check(arg, "Optional[t] requires a single type.")
return Union[arg, type(None)]
Optional = _Optional(_root=True)
def _next_in_mro(cls):
"""Helper for Generic.__new__.
Returns the class after the last occurrence of Generic or
Generic[...] in cls.__mro__.
"""
next_in_mro = object
# Look for the last occurrence of Generic or Generic[...].
for i, c in enumerate(cls.__mro__[:-1]):
if isinstance(c, GenericMeta) and c._gorg is Generic:
next_in_mro = cls.__mro__[i + 1]
return next_in_mro
def _make_subclasshook(cls):
"""Construct a __subclasshook__ callable that incorporates
the associated __extra__ class in subclass checks performed
against cls.
"""
if isinstance(cls.__extra__, abc.ABCMeta):
# The logic mirrors that of ABCMeta.__subclasscheck__.
# Registered classes need not be checked here because
# cls and its extra share the same _abc_registry.
def __extrahook__(cls, subclass):
res = cls.__extra__.__subclasshook__(subclass)
if res is not NotImplemented:
return res
if cls.__extra__ in getattr(subclass, '__mro__', ()):
return True
for scls in cls.__extra__.__subclasses__():
if isinstance(scls, GenericMeta):
continue
if issubclass(subclass, scls):
return True
return NotImplemented
else:
# For non-ABC extras we'll just call issubclass().
def __extrahook__(cls, subclass):
if cls.__extra__ and issubclass(subclass, cls.__extra__):
return True
return NotImplemented
return classmethod(__extrahook__)
class GenericMeta(TypingMeta, abc.ABCMeta):
"""Metaclass for generic types.
This is a metaclass for typing.Generic and generic ABCs defined in
typing module. User defined subclasses of GenericMeta can override
__new__ and invoke super().__new__. Note that GenericMeta.__new__
has strict rules on what is allowed in its bases argument:
* plain Generic is disallowed in bases;
* Generic[...] should appear in bases at most once;
* if Generic[...] is present, then it should list all type variables
that appear in other bases.
In addition, type of all generic bases is erased, e.g., C[int] is
stripped to plain C.
"""
def __new__(cls, name, bases, namespace,
tvars=None, args=None, origin=None, extra=None, orig_bases=None):
"""Create a new generic class. GenericMeta.__new__ accepts
keyword arguments that are used for internal bookkeeping, therefore
an override should pass unused keyword arguments to super().
"""
if tvars is not None:
# Called from __getitem__() below.
assert origin is not None
assert all(isinstance(t, TypeVar) for t in tvars), tvars
else:
# Called from class statement.
assert tvars is None, tvars
assert args is None, args
assert origin is None, origin
# Get the full set of tvars from the bases.
tvars = _type_vars(bases)
# Look for Generic[T1, ..., Tn].
# If found, tvars must be a subset of it.
# If not found, tvars is it.
# Also check for and reject plain Generic,
# and reject multiple Generic[...].
gvars = None
for base in bases:
if base is Generic:
raise TypeError("Cannot inherit from plain Generic")
if (isinstance(base, GenericMeta) and
base.__origin__ is Generic):
if gvars is not None:
raise TypeError(
"Cannot inherit from Generic[...] multiple types.")
gvars = base.__parameters__
if gvars is None:
gvars = tvars
else:
tvarset = set(tvars)
gvarset = set(gvars)
if not tvarset <= gvarset:
raise TypeError(
"Some type variables (%s) "
"are not listed in Generic[%s]" %
(", ".join(str(t) for t in tvars if t not in gvarset),
", ".join(str(g) for g in gvars)))
tvars = gvars
initial_bases = bases
if extra is None:
extra = namespace.get('__extra__')
if extra is not None and type(extra) is abc.ABCMeta and extra not in bases:
bases = (extra,) + bases
bases = tuple(b._gorg if isinstance(b, GenericMeta) else b for b in bases)
# remove bare Generic from bases if there are other generic bases
if any(isinstance(b, GenericMeta) and b is not Generic for b in bases):
bases = tuple(b for b in bases if b is not Generic)
namespace.update({'__origin__': origin, '__extra__': extra})
self = super(GenericMeta, cls).__new__(cls, name, bases, namespace)
super(GenericMeta, self).__setattr__('_gorg',
self if not origin else origin._gorg)
self.__parameters__ = tvars
# Be prepared that GenericMeta will be subclassed by TupleMeta
# and CallableMeta, those two allow ..., (), or [] in __args___.
self.__args__ = tuple(Ellipsis if a is _TypingEllipsis else
() if a is _TypingEmpty else
a for a in args) if args else None
# Speed hack (https://github.com/python/typing/issues/196).
self.__next_in_mro__ = _next_in_mro(self)
# Preserve base classes on subclassing (__bases__ are type erased now).
if orig_bases is None:
self.__orig_bases__ = initial_bases
# This allows unparameterized generic collections to be used
# with issubclass() and isinstance() in the same way as their
# collections.abc counterparts (e.g., isinstance([], Iterable)).
if (
'__subclasshook__' not in namespace and extra or
# allow overriding
getattr(self.__subclasshook__, '__name__', '') == '__extrahook__'
):
self.__subclasshook__ = _make_subclasshook(self)
if origin and hasattr(origin, '__qualname__'): # Fix for Python 3.2.
self.__qualname__ = origin.__qualname__
self.__tree_hash__ = (hash(self._subs_tree()) if origin else
super(GenericMeta, self).__hash__())
return self
def __init__(self, *args, **kwargs):
super(GenericMeta, self).__init__(*args, **kwargs)
if isinstance(self.__extra__, abc.ABCMeta):
self._abc_registry = self.__extra__._abc_registry
self._abc_cache = self.__extra__._abc_cache
elif self.__origin__ is not None:
self._abc_registry = self.__origin__._abc_registry
self._abc_cache = self.__origin__._abc_cache
# _abc_negative_cache and _abc_negative_cache_version
# realised as descriptors, since GenClass[t1, t2, ...] always
# share subclass info with GenClass.
# This is an important memory optimization.
@property
def _abc_negative_cache(self):
if isinstance(self.__extra__, abc.ABCMeta):
return self.__extra__._abc_negative_cache
return self._gorg._abc_generic_negative_cache
@_abc_negative_cache.setter
def _abc_negative_cache(self, value):
if self.__origin__ is None:
if isinstance(self.__extra__, abc.ABCMeta):
self.__extra__._abc_negative_cache = value
else:
self._abc_generic_negative_cache = value
@property
def _abc_negative_cache_version(self):
if isinstance(self.__extra__, abc.ABCMeta):
return self.__extra__._abc_negative_cache_version
return self._gorg._abc_generic_negative_cache_version
@_abc_negative_cache_version.setter
def _abc_negative_cache_version(self, value):
if self.__origin__ is None:
if isinstance(self.__extra__, abc.ABCMeta):
self.__extra__._abc_negative_cache_version = value
else:
self._abc_generic_negative_cache_version = value
def _get_type_vars(self, tvars):
if self.__origin__ and self.__parameters__:
_get_type_vars(self.__parameters__, tvars)
def _eval_type(self, globalns, localns):
ev_origin = (self.__origin__._eval_type(globalns, localns)
if self.__origin__ else None)
ev_args = tuple(_eval_type(a, globalns, localns) for a
in self.__args__) if self.__args__ else None
if ev_origin == self.__origin__ and ev_args == self.__args__:
return self
return self.__class__(self.__name__,
self.__bases__,
dict(self.__dict__),
tvars=_type_vars(ev_args) if ev_args else None,
args=ev_args,
origin=ev_origin,
extra=self.__extra__,
orig_bases=self.__orig_bases__)
def __repr__(self):
if self.__origin__ is None:
return super(GenericMeta, self).__repr__()
return self._tree_repr(self._subs_tree())
def _tree_repr(self, tree):
arg_list = []
for arg in tree[1:]:
if arg == ():
arg_list.append('()')
elif not isinstance(arg, tuple):
arg_list.append(_type_repr(arg))
else:
arg_list.append(arg[0]._tree_repr(arg))
return super(GenericMeta, self).__repr__() + '[%s]' % ', '.join(arg_list)
def _subs_tree(self, tvars=None, args=None):
if self.__origin__ is None:
return self
tree_args = _subs_tree(self, tvars, args)
return (self._gorg,) + tuple(tree_args)
def __eq__(self, other):
if not isinstance(other, GenericMeta):
return NotImplemented
if self.__origin__ is None or other.__origin__ is None:
return self is other
return self.__tree_hash__ == other.__tree_hash__
def __hash__(self):
return self.__tree_hash__
@_tp_cache
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
if not params and self._gorg is not Tuple:
raise TypeError(
"Parameter list to %s[...] cannot be empty" % _qualname(self))
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
if self is Generic:
# Generic can only be subscripted with unique type variables.
if not all(isinstance(p, TypeVar) for p in params):
raise TypeError(
"Parameters to Generic[...] must all be type variables")
if len(set(params)) != len(params):
raise TypeError(
"Parameters to Generic[...] must all be unique")
tvars = params
args = params
elif self in (Tuple, Callable):
tvars = _type_vars(params)
args = params
elif self is _Protocol:
# _Protocol is internal, don't check anything.
tvars = params
args = params
elif self.__origin__ in (Generic, _Protocol):
# Can't subscript Generic[...] or _Protocol[...].
raise TypeError("Cannot subscript already-subscripted %s" %
repr(self))
else:
# Subscripting a regular Generic subclass.
_check_generic(self, params)
tvars = _type_vars(params)
args = params
prepend = (self,) if self.__origin__ is None else ()
return self.__class__(self.__name__,
prepend + self.__bases__,
dict(self.__dict__),
tvars=tvars,
args=args,
origin=self,
extra=self.__extra__,
orig_bases=self.__orig_bases__)
def __subclasscheck__(self, cls):
if self.__origin__ is not None:
# This should only be modules within the standard
# library. singledispatch is the only exception, because
# it's a Python 2 backport of functools.singledispatch.
if sys._getframe(1).f_globals['__name__'] not in ['abc', 'functools',
'singledispatch']:
raise TypeError("Parameterized generics cannot be used with class "
"or instance checks")
return False
if self is Generic:
raise TypeError("Class %r cannot be used with class "
"or instance checks" % self)
return super(GenericMeta, self).__subclasscheck__(cls)
def __instancecheck__(self, instance):
# Since we extend ABC.__subclasscheck__ and
# ABC.__instancecheck__ inlines the cache checking done by the
# latter, we must extend __instancecheck__ too. For simplicity
# we just skip the cache check -- instance checks for generic
# classes are supposed to be rare anyways.
if not isinstance(instance, type):
return issubclass(instance.__class__, self)
return False
def __setattr__(self, attr, value):
# We consider all the subscripted genrics as proxies for original class
if (
attr.startswith('__') and attr.endswith('__') or
attr.startswith('_abc_')
):
super(GenericMeta, self).__setattr__(attr, value)
else:
super(GenericMeta, self._gorg).__setattr__(attr, value)
def _copy_generic(self):
"""Hack to work around https://bugs.python.org/issue11480 on Python 2"""
return self.__class__(self.__name__, self.__bases__, dict(self.__dict__),
self.__parameters__, self.__args__, self.__origin__,
self.__extra__, self.__orig_bases__)
copy._copy_dispatch[GenericMeta] = _copy_generic
# Prevent checks for Generic to crash when defining Generic.
Generic = None
def _generic_new(base_cls, cls, *args, **kwds):
# Assure type is erased on instantiation,
# but attempt to store it in __orig_class__
if cls.__origin__ is None:
return base_cls.__new__(cls)
else:
origin = cls._gorg
obj = base_cls.__new__(origin)
try:
obj.__orig_class__ = cls
except AttributeError:
pass
obj.__init__(*args, **kwds)
return obj
class Generic(object):
"""Abstract base class for generic types.
A generic type is typically declared by inheriting from
this class parameterized with one or more type variables.
For example, a generic mapping type might be defined as::
class Mapping(Generic[KT, VT]):
def __getitem__(self, key: KT) -> VT:
...
# Etc.
This class can then be used as follows::
def lookup_name(mapping: Mapping[KT, VT], key: KT, default: VT) -> VT:
try:
return mapping[key]
except KeyError:
return default
"""
__metaclass__ = GenericMeta
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is Generic:
raise TypeError("Type Generic cannot be instantiated; "
"it can be used only as a base class")
return _generic_new(cls.__next_in_mro__, cls, *args, **kwds)
class _TypingEmpty(object):
"""Internal placeholder for () or []. Used by TupleMeta and CallableMeta
to allow empty list/tuple in specific places, without allowing them
to sneak in where prohibited.
"""
class _TypingEllipsis(object):
"""Internal placeholder for ... (ellipsis)."""
class TupleMeta(GenericMeta):
"""Metaclass for Tuple (internal)."""
@_tp_cache
def __getitem__(self, parameters):
if self.__origin__ is not None or self._gorg is not Tuple:
# Normal generic rules apply if this is not the first subscription
# or a subscription of a subclass.
return super(TupleMeta, self).__getitem__(parameters)
if parameters == ():
return super(TupleMeta, self).__getitem__((_TypingEmpty,))
if not isinstance(parameters, tuple):
parameters = (parameters,)
if len(parameters) == 2 and parameters[1] is Ellipsis:
msg = "Tuple[t, ...]: t must be a type."
p = _type_check(parameters[0], msg)
return super(TupleMeta, self).__getitem__((p, _TypingEllipsis))
msg = "Tuple[t0, t1, ...]: each t must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
return super(TupleMeta, self).__getitem__(parameters)
def __instancecheck__(self, obj):
if self.__args__ is None:
return isinstance(obj, tuple)
raise TypeError("Parameterized Tuple cannot be used "
"with isinstance().")
def __subclasscheck__(self, cls):
if self.__args__ is None:
return issubclass(cls, tuple)
raise TypeError("Parameterized Tuple cannot be used "
"with issubclass().")
copy._copy_dispatch[TupleMeta] = _copy_generic
class Tuple(tuple):
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
Example: Tuple[T1, T2] is a tuple of two elements corresponding
to type variables T1 and T2. Tuple[int, float, str] is a tuple
of an int, a float and a string.
To specify a variable-length tuple of homogeneous type, use Tuple[T, ...].
"""
__metaclass__ = TupleMeta
__extra__ = tuple
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is Tuple:
raise TypeError("Type Tuple cannot be instantiated; "
"use tuple() instead")
return _generic_new(tuple, cls, *args, **kwds)
class CallableMeta(GenericMeta):
""" Metaclass for Callable."""
def __repr__(self):
if self.__origin__ is None:
return super(CallableMeta, self).__repr__()
return self._tree_repr(self._subs_tree())
def _tree_repr(self, tree):
if self._gorg is not Callable:
return super(CallableMeta, self)._tree_repr(tree)
# For actual Callable (not its subclass) we override
# super(CallableMeta, self)._tree_repr() for nice formatting.
arg_list = []
for arg in tree[1:]:
if not isinstance(arg, tuple):
arg_list.append(_type_repr(arg))
else:
arg_list.append(arg[0]._tree_repr(arg))
if arg_list[0] == '...':
return repr(tree[0]) + '[..., %s]' % arg_list[1]
return (repr(tree[0]) +
'[[%s], %s]' % (', '.join(arg_list[:-1]), arg_list[-1]))
def __getitem__(self, parameters):
"""A thin wrapper around __getitem_inner__ to provide the latter
with hashable arguments to improve speed.
"""
if self.__origin__ is not None or self._gorg is not Callable:
return super(CallableMeta, self).__getitem__(parameters)
if not isinstance(parameters, tuple) or len(parameters) != 2:
raise TypeError("Callable must be used as "
"Callable[[arg, ...], result].")
args, result = parameters
if args is Ellipsis:
parameters = (Ellipsis, result)
else:
if not isinstance(args, list):
raise TypeError("Callable[args, result]: args must be a list."
" Got %.100r." % (args,))
parameters = (tuple(args), result)
return self.__getitem_inner__(parameters)
@_tp_cache
def __getitem_inner__(self, parameters):
args, result = parameters
msg = "Callable[args, result]: result must be a type."
result = _type_check(result, msg)
if args is Ellipsis:
return super(CallableMeta, self).__getitem__((_TypingEllipsis, result))
msg = "Callable[[arg, ...], result]: each arg must be a type."
args = tuple(_type_check(arg, msg) for arg in args)
parameters = args + (result,)
return super(CallableMeta, self).__getitem__(parameters)
copy._copy_dispatch[CallableMeta] = _copy_generic
class Callable(object):
"""Callable type; Callable[[int], str] is a function of (int) -> str.
The subscription syntax must always be used with exactly two
values: the argument list and the return type. The argument list
must be a list of types or ellipsis; the return type must be a single type.
There is no syntax to indicate optional or keyword arguments,
such function types are rarely used as callback types.
"""
__metaclass__ = CallableMeta
__extra__ = collections_abc.Callable
__slots__ = ()
def __new__(cls, *args, **kwds):
if cls._gorg is Callable:
raise TypeError("Type Callable cannot be instantiated; "
"use a non-abstract subclass instead")
return _generic_new(cls.__next_in_mro__, cls, *args, **kwds)
def cast(typ, val):
"""Cast a value to a type.
This returns the value unchanged. To the type checker this
signals that the return value has the designated type, but at
runtime we intentionally don't check anything (we want this
to be as fast as possible).
"""
return val
def _get_defaults(func):
"""Internal helper to extract the default arguments, by name."""
code = func.__code__
pos_count = code.co_argcount
arg_names = code.co_varnames
arg_names = arg_names[:pos_count]
defaults = func.__defaults__ or ()
kwdefaults = func.__kwdefaults__
res = dict(kwdefaults) if kwdefaults else {}
pos_offset = pos_count - len(defaults)
for name, value in zip(arg_names[pos_offset:], defaults):
assert name not in res
res[name] = value
return res
def get_type_hints(obj, globalns=None, localns=None):
"""In Python 2 this is not supported and always returns None."""
return None
def no_type_check(arg):
"""Decorator to indicate that annotations are not type hints.
The argument must be a class or function; if it is a class, it
applies recursively to all methods and classes defined in that class
(but not to methods defined in its superclasses or subclasses).
This mutates the function(s) or class(es) in place.
"""
if isinstance(arg, type):
arg_attrs = arg.__dict__.copy()
for attr, val in arg.__dict__.items():
if val in arg.__bases__ + (arg,):
arg_attrs.pop(attr)
for obj in arg_attrs.values():
if isinstance(obj, types.FunctionType):
obj.__no_type_check__ = True
if isinstance(obj, type):
no_type_check(obj)
try:
arg.__no_type_check__ = True
except TypeError: # built-in classes
pass
return arg
def no_type_check_decorator(decorator):
"""Decorator to give another decorator the @no_type_check effect.
This wraps the decorator with something that wraps the decorated
function in @no_type_check.
"""
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator
def _overload_dummy(*args, **kwds):
"""Helper for @overload to raise when called."""
raise NotImplementedError(
"You should not call an overloaded function. "
"A series of @overload-decorated functions "
"outside a stub module should always be followed "
"by an implementation that is not @overload-ed.")
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
"""
return _overload_dummy
class _ProtocolMeta(GenericMeta):
"""Internal metaclass for _Protocol.
This exists so _Protocol classes can be generic without deriving
from Generic.
"""
def __instancecheck__(self, obj):
if _Protocol not in self.__bases__:
return super(_ProtocolMeta, self).__instancecheck__(obj)
raise TypeError("Protocols cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if not self._is_protocol:
# No structural checks since this isn't a protocol.
return NotImplemented
if self is _Protocol:
# Every class is a subclass of the empty protocol.
return True
# Find all attributes defined in the protocol.
attrs = self._get_protocol_attrs()
for attr in attrs:
if not any(attr in d.__dict__ for d in cls.__mro__):
return False
return True
def _get_protocol_attrs(self):
# Get all Protocol base classes.
protocol_bases = []
for c in self.__mro__:
if getattr(c, '_is_protocol', False) and c.__name__ != '_Protocol':
protocol_bases.append(c)
# Get attributes included in protocol.
attrs = set()
for base in protocol_bases:
for attr in base.__dict__.keys():
# Include attributes not defined in any non-protocol bases.
for c in self.__mro__:
if (c is not base and attr in c.__dict__ and
not getattr(c, '_is_protocol', False)):
break
else:
if (not attr.startswith('_abc_') and
attr != '__abstractmethods__' and
attr != '_is_protocol' and
attr != '_gorg' and
attr != '__dict__' and
attr != '__args__' and
attr != '__slots__' and
attr != '_get_protocol_attrs' and
attr != '__next_in_mro__' and
attr != '__parameters__' and
attr != '__origin__' and
attr != '__orig_bases__' and
attr != '__extra__' and
attr != '__tree_hash__' and
attr != '__module__'):
attrs.add(attr)
return attrs
class _Protocol(object):
"""Internal base class for protocol classes.
This implements a simple-minded structural issubclass check
(similar but more general than the one-offs in collections.abc
such as Hashable).
"""
__metaclass__ = _ProtocolMeta
__slots__ = ()
_is_protocol = True
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
Hashable = collections_abc.Hashable # Not generic.
class Iterable(Generic[T_co]):
__slots__ = ()
__extra__ = collections_abc.Iterable
class Iterator(Iterable[T_co]):
__slots__ = ()
__extra__ = collections_abc.Iterator
class SupportsInt(_Protocol):
__slots__ = ()
@abstractmethod
def __int__(self):
pass
class SupportsFloat(_Protocol):
__slots__ = ()
@abstractmethod
def __float__(self):
pass
class SupportsComplex(_Protocol):
__slots__ = ()
@abstractmethod
def __complex__(self):
pass
class SupportsAbs(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __abs__(self):
pass
if hasattr(collections_abc, 'Reversible'):
class Reversible(Iterable[T_co]):
__slots__ = ()
__extra__ = collections_abc.Reversible
else:
class Reversible(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __reversed__(self):
pass
Sized = collections_abc.Sized # Not generic.
class Container(Generic[T_co]):
__slots__ = ()
__extra__ = collections_abc.Container
# Callable was defined earlier.
class AbstractSet(Sized, Iterable[T_co], Container[T_co]):
__slots__ = ()
__extra__ = collections_abc.Set
class MutableSet(AbstractSet[T]):
__slots__ = ()
__extra__ = collections_abc.MutableSet
# NOTE: It is only covariant in the value type.
class Mapping(Sized, Iterable[KT], Container[KT], Generic[KT, VT_co]):
__slots__ = ()
__extra__ = collections_abc.Mapping
class MutableMapping(Mapping[KT, VT]):
__slots__ = ()
__extra__ = collections_abc.MutableMapping
if hasattr(collections_abc, 'Reversible'):
class Sequence(Sized, Reversible[T_co], Container[T_co]):
__slots__ = ()
__extra__ = collections_abc.Sequence
else:
class Sequence(Sized, Iterable[T_co], Container[T_co]):
__slots__ = ()
__extra__ = collections_abc.Sequence
class MutableSequence(Sequence[T]):
__slots__ = ()
__extra__ = collections_abc.MutableSequence
class ByteString(Sequence[int]):
pass
ByteString.register(str)
ByteString.register(bytearray)
class List(list, MutableSequence[T]):
__slots__ = ()
__extra__ = list
def __new__(cls, *args, **kwds):
if cls._gorg is List:
raise TypeError("Type List cannot be instantiated; "
"use list() instead")
return _generic_new(list, cls, *args, **kwds)
class Deque(collections.deque, MutableSequence[T]):
__slots__ = ()
__extra__ = collections.deque
def __new__(cls, *args, **kwds):
if cls._gorg is Deque:
return collections.deque(*args, **kwds)
return _generic_new(collections.deque, cls, *args, **kwds)
class Set(set, MutableSet[T]):
__slots__ = ()
__extra__ = set
def __new__(cls, *args, **kwds):
if cls._gorg is Set:
raise TypeError("Type Set cannot be instantiated; "
"use set() instead")
return _generic_new(set, cls, *args, **kwds)
class FrozenSet(frozenset, AbstractSet[T_co]):
__slots__ = ()
__extra__ = frozenset
def __new__(cls, *args, **kwds):
if cls._gorg is FrozenSet:
raise TypeError("Type FrozenSet cannot be instantiated; "
"use frozenset() instead")
return _generic_new(frozenset, cls, *args, **kwds)
class MappingView(Sized, Iterable[T_co]):
__slots__ = ()
__extra__ = collections_abc.MappingView
class KeysView(MappingView[KT], AbstractSet[KT]):
__slots__ = ()
__extra__ = collections_abc.KeysView
class ItemsView(MappingView[Tuple[KT, VT_co]],
AbstractSet[Tuple[KT, VT_co]],
Generic[KT, VT_co]):
__slots__ = ()
__extra__ = collections_abc.ItemsView
class ValuesView(MappingView[VT_co]):
__slots__ = ()
__extra__ = collections_abc.ValuesView
class ContextManager(Generic[T_co]):
__slots__ = ()
def __enter__(self):
return self
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
return None
@classmethod
def __subclasshook__(cls, C):
if cls is ContextManager:
# In Python 3.6+, it is possible to set a method to None to
# explicitly indicate that the class does not implement an ABC
# (https://bugs.python.org/issue25958), but we do not support
# that pattern here because this fallback class is only used
# in Python 3.5 and earlier.
if (any("__enter__" in B.__dict__ for B in C.__mro__) and
any("__exit__" in B.__dict__ for B in C.__mro__)):
return True
return NotImplemented
class Dict(dict, MutableMapping[KT, VT]):
__slots__ = ()
__extra__ = dict
def __new__(cls, *args, **kwds):
if cls._gorg is Dict:
raise TypeError("Type Dict cannot be instantiated; "
"use dict() instead")
return _generic_new(dict, cls, *args, **kwds)
class DefaultDict(collections.defaultdict, MutableMapping[KT, VT]):
__slots__ = ()
__extra__ = collections.defaultdict
def __new__(cls, *args, **kwds):
if cls._gorg is DefaultDict:
return collections.defaultdict(*args, **kwds)
return _generic_new(collections.defaultdict, cls, *args, **kwds)
class Counter(collections.Counter, Dict[T, int]):
__slots__ = ()
__extra__ = collections.Counter
def __new__(cls, *args, **kwds):
if cls._gorg is Counter:
return collections.Counter(*args, **kwds)
return _generic_new(collections.Counter, cls, *args, **kwds)
# Determine what base class to use for Generator.
if hasattr(collections_abc, 'Generator'):
# Sufficiently recent versions of 3.5 have a Generator ABC.
_G_base = collections_abc.Generator
else:
# Fall back on the exact type.
_G_base = types.GeneratorType
class Generator(Iterator[T_co], Generic[T_co, T_contra, V_co]):
__slots__ = ()
__extra__ = _G_base
def __new__(cls, *args, **kwds):
if cls._gorg is Generator:
raise TypeError("Type Generator cannot be instantiated; "
"create a subclass instead")
return _generic_new(_G_base, cls, *args, **kwds)
# Internal type variable used for Type[].
CT_co = TypeVar('CT_co', covariant=True, bound=type)
# This is not a real generic class. Don't use outside annotations.
class Type(Generic[CT_co]):
"""A special construct usable to annotate class objects.
For example, suppose we have the following classes::
class User: ... # Abstract base for User classes
class BasicUser(User): ...
class ProUser(User): ...
class TeamUser(User): ...
And a function that takes a class argument that's a subclass of
User and returns an instance of the corresponding class::
U = TypeVar('U', bound=User)
def new_user(user_class: Type[U]) -> U:
user = user_class()
# (Here we could write the user object to a database)
return user
joe = new_user(BasicUser)
At this point the type checker knows that joe has type BasicUser.
"""
__slots__ = ()
__extra__ = type
def NamedTuple(typename, fields):
"""Typed version of namedtuple.
Usage::
Employee = typing.NamedTuple('Employee', [('name', str), ('id', int)])
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has one extra attribute: _field_types,
giving a dict mapping field names to types. (The field names
are in the _fields attribute, which is part of the namedtuple
API.)
"""
fields = [(n, t) for n, t in fields]
cls = collections.namedtuple(typename, [n for n, t in fields])
cls._field_types = dict(fields)
# Set the module to the caller's module (otherwise it'd be 'typing').
try:
cls.__module__ = sys._getframe(1).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError):
pass
return cls
def NewType(name, tp):
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy function that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id):
# type: (UserId) -> str
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
def new_type(x):
return x
# Some versions of Python 2 complain because of making all strings unicode
new_type.__name__ = str(name)
new_type.__supertype__ = tp
return new_type
# Python-version-specific alias (Python 2: unicode; Python 3: str)
Text = unicode
# Constant that's True when type checking, but False here.
TYPE_CHECKING = False
class IO(Generic[AnyStr]):
"""Generic base class for TextIO and BinaryIO.
This is an abstract, generic version of the return of open().
NOTE: This does not distinguish between the different possible
classes (text vs. binary, read vs. write vs. read/write,
append-only, unbuffered). The TextIO and BinaryIO subclasses
below capture the distinctions between text vs. binary, which is
pervasive in the interface; however we currently do not offer a
way to track the other distinctions in the type system.
"""
__slots__ = ()
@abstractproperty
def mode(self):
pass
@abstractproperty
def name(self):
pass
@abstractmethod
def close(self):
pass
@abstractmethod
def closed(self):
pass
@abstractmethod
def fileno(self):
pass
@abstractmethod
def flush(self):
pass
@abstractmethod
def isatty(self):
pass
@abstractmethod
def read(self, n=-1):
pass
@abstractmethod
def readable(self):
pass
@abstractmethod
def readline(self, limit=-1):
pass
@abstractmethod
def readlines(self, hint=-1):
pass
@abstractmethod
def seek(self, offset, whence=0):
pass
@abstractmethod
def seekable(self):
pass
@abstractmethod
def tell(self):
pass
@abstractmethod
def truncate(self, size=None):
pass
@abstractmethod
def writable(self):
pass
@abstractmethod
def write(self, s):
pass
@abstractmethod
def writelines(self, lines):
pass
@abstractmethod
def __enter__(self):
pass
@abstractmethod
def __exit__(self, type, value, traceback):
pass
class BinaryIO(IO[bytes]):
"""Typed version of the return of open() in binary mode."""
__slots__ = ()
@abstractmethod
def write(self, s):
pass
@abstractmethod
def __enter__(self):
pass
class TextIO(IO[unicode]):
"""Typed version of the return of open() in text mode."""
__slots__ = ()
@abstractproperty
def buffer(self):
pass
@abstractproperty
def encoding(self):
pass
@abstractproperty
def errors(self):
pass
@abstractproperty
def line_buffering(self):
pass
@abstractproperty
def newlines(self):
pass
@abstractmethod
def __enter__(self):
pass
class io(object):
"""Wrapper namespace for IO generic classes."""
__all__ = ['IO', 'TextIO', 'BinaryIO']
IO = IO
TextIO = TextIO
BinaryIO = BinaryIO
io.__name__ = __name__ + b'.io'
sys.modules[io.__name__] = io
Pattern = _TypeAlias('Pattern', AnyStr, type(stdlib_re.compile('')),
lambda p: p.pattern)
Match = _TypeAlias('Match', AnyStr, type(stdlib_re.match('', '')),
lambda m: m.re.pattern)
class re(object):
"""Wrapper namespace for re type aliases."""
__all__ = ['Pattern', 'Match']
Pattern = Pattern
Match = Match
re.__name__ = __name__ + b'.re'
sys.modules[re.__name__] = re
| [
"nneesshh@163.com"
] | nneesshh@163.com |
b4ce0ab8358e4b133a3e29c81f58f4af43d26e85 | 35da6e7db2872b5e75fb042244dc7ba403987dbe | /dataCache.py | 76e6b5f0e6a2d851b8785a1783e567eb6396b176 | [] | no_license | sudhakosuri/MIPS-Architecture-CPU-Analyzer | fec591aecc2ed7c071eb4a9625e76e84ff039d3b | 4fb67ae70c528f7f0f0439da42b3ff91aba77217 | refs/heads/master | 2020-12-11T15:55:33.527148 | 2020-01-14T17:00:35 | 2020-01-14T17:00:35 | 233,890,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,499 | py | import reg
D_Cache = [[0 for x in range(8)] for y in range(2)]
val1 = 0
val2 = 0
val3 = 0
val4 = 0
dcache_hits = 0
def search(current_value, data_value, displacement, isDouble):
global dcache_hits
for row in range(0, 2):
for d_value in range(0, 8):
if (D_Cache[row][d_value] == current_value):
dcache_hits += 1
print(dcache_hits)
print("Found!!")
return False
implement_setAssociativeCache(current_value,data_value, displacement, isDouble)
#dcache_hits += 1
return True
def isDataCacheMiss(data_value, displacement, isFirst, isDouble):
global dcache_hits
if(isFirst == 0):
register_index_value = int(data_value[1:])
current_value = reg.R[register_index_value] + displacement
implement_setAssociativeCache(current_value,data_value, displacement, isDouble)
dcache_hits += 1
return True
else:
register_index_value = int(data_value[1:])
current_value = reg.R[register_index_value] + displacement
if(isDouble == False):
return search(current_value,data_value, displacement, isDouble)
else:
if(isDouble == True):
dcache_hits += 1
return search(current_value+4,data_value, displacement, isDouble)
def implement_setAssociativeCache(current_value,data_value,displacement, isDouble):
global D_Cache
register_index_value = int(data_value[1:])
word_address = current_value
block_number = int(word_address/4)
cache_word_address = block_number%4
set_number = block_number%2
if(cache_word_address == 1):
val1 = word_address - 4
val2 = val1+4
val3 = val2+4
val4 = val3+4
if(cache_word_address == 2):
val1 = word_address - 8
val2 = word_address - 4
val3 = word_address
val4 = val3 + 4
if(cache_word_address == 3):
val1 = word_address - 12
val2 = word_address - 8
val3 = word_address - 4
val4 = word_address
if (cache_word_address == 0):
val1 = word_address
val2 = val1+4
val3 = val2+4
val4 = val3+4
current = "current_MRU_None"
if(D_Cache[set_number][0] == 0):
D_Cache[set_number][0] = val1
D_Cache[set_number][1] = val2
D_Cache[set_number][2] = val3
D_Cache[set_number][3] = val4
current = 'current_MRU_'+str(set_number)+'_0'
elif (D_Cache[set_number][4] == 0):
D_Cache[set_number][4] = val1
D_Cache[set_number][5] = val2
D_Cache[set_number][6] = val3
D_Cache[set_number][7] = val4
current = 'current_MRU_'+str(set_number)+'_1'
else:
if(set_number == 0 and current == "current_MRU_0_0"):
D_Cache[set_number][4] = val1
D_Cache[set_number][5] = val2
D_Cache[set_number][6] = val3
D_Cache[set_number][7] = val4
current = "current_MRU_0_1"
elif(set_number == 0 and current == "current_MRU_0_1"):
D_Cache[set_number][0] = val1
D_Cache[set_number][1] = val2
D_Cache[set_number][2] = val3
D_Cache[set_number][3] = val4
current = "current_MRU_0_0"
elif(set_number == 1 and current == "current_MRU_1_0"):
D_Cache[set_number][4] = val1
D_Cache[set_number][5] = val2
D_Cache[set_number][6] = val3
D_Cache[set_number][7] = val4
current = "current_MRU_1_1"
elif(set_number == 1 and current == "current_MRU_1_1"):
D_Cache[set_number][0] = val1
D_Cache[set_number][1] = val2
D_Cache[set_number][2] = val3
D_Cache[set_number][3] = val4
current = "current_MRU_1_0"
#print('Added succesfully !!')
'''print(isDataCacheMiss('R5', 12, 1,True))
print("heyyyyyyyyyyyyyyyyyyyyyyy")
print(D_Cache[1][0])
print(D_Cache[1][1])
print(D_Cache[1][2])
print(D_Cache[1][3])
print(D_Cache[1][4])
print(D_Cache[1][5])
print(D_Cache[1][6])
print(D_Cache[1][7])
print(D_Cache[0][0])
print(D_Cache[0][1])
print(D_Cache[0][2])
print(D_Cache[0][3])
print(D_Cache[0][4])
print(D_Cache[0][5])
print(D_Cache[0][6])
print(D_Cache[0][7])
print("hellooooooooooooooooooooooooooo")''' | [
"noreply@github.com"
] | noreply@github.com |
ead6973b197fb0173814eb97e7b5102034a3a425 | d486cbde3d3ef37e42ded11de2730c6dc1565319 | /lab4/task1.py | d1ef85fdc12d2ca088a5656345905d54068487ee | [] | no_license | dominika-olszewska/python-exercises | 13b401d5103aa50864c11ca79d523d54dde404cd | 552a0658ac0ef21201dc74dda17dbda9a10e5f2c | refs/heads/master | 2022-06-12T16:13:16.074241 | 2022-05-22T15:11:54 | 2022-05-22T15:11:54 | 173,439,317 | 0 | 0 | null | 2022-05-22T15:11:54 | 2019-03-02T11:25:06 | Jupyter Notebook | UTF-8 | Python | false | false | 696 | py | import numpy as np
import matplotlib.pyplot as plt
# TASKS (9p)
# 1 Looking at the Euler method above create your own function which takes:
# a (from x' = ax)
# h - step
# T time range
# as an input and plots the solution of a differential equation x' = ax (1p)
# TASK 1
def Euler(a, T, h):
initial_x = 1
t = np.arange(0, T, h) # start stop step
x = np.zeros(t.shape)
x[0] = initial_x
for i in range(t.size - 1):
x[i + 1] = x[i] + h * (a * x[i])
plt.plot(t, x, 'g')
plt.xlabel('t', fontsize=14)
plt.ylabel('x', fontsize=14)
plt.show()
try:
Euler(1, 5, 0.1)
except TypeError as e:
print(e)
except AttributeError as e:
print(e)
| [
"dominika275@gmail.com"
] | dominika275@gmail.com |
706c4a133f112d01c765c80eac0083d6d5e90652 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/uhd_restpy/testplatform/sessions/ixnetwork/topology/rxsakpool_22340fe5cb5d81664cab595d3e6d08ef.py | 8aea7fbb4b72c3d049aa51d15c50a9fa0db81919 | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,134 | py | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from uhd_restpy.base import Base
from uhd_restpy.files import Files
from typing import List, Any, Union
class RxSakPool(Base):
"""Rx Channels configuration.
The RxSakPool class encapsulates a required rxSakPool resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'rxSakPool'
_SDM_ATT_MAP = {
'Count': 'count',
'DescriptiveName': 'descriptiveName',
'Name': 'name',
'RxSak128': 'rxSak128',
'RxSak256': 'rxSak256',
'RxSalt': 'rxSalt',
'RxSsci': 'rxSsci',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(RxSakPool, self).__init__(parent, list_op)
@property
def Count(self):
# type: () -> int
"""
Returns
-------
- number: Number of elements inside associated multiplier-scaled container object, e.g. number of devices inside a Device Group.
"""
return self._get_attribute(self._SDM_ATT_MAP['Count'])
@property
def DescriptiveName(self):
# type: () -> str
"""
Returns
-------
- str: Longer, more descriptive name for element. It's not guaranteed to be unique like -name-, but may offer more context.
"""
return self._get_attribute(self._SDM_ATT_MAP['DescriptiveName'])
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: Name of NGPF element, guaranteed to be unique in Scenario
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def RxSak128(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 128 bit value of Secure Association Key with which DUT is expected to encrypt MACsec packets.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxSak128']))
@property
def RxSak256(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 256 bit value of Secure Association Key with which DUT is expected to encrypt MACsec packets.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxSak256']))
@property
def RxSalt(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 12 bytes Salt value for XPN cipher suites.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxSalt']))
@property
def RxSsci(self):
# type: () -> 'Multivalue'
"""
Returns
-------
- obj(uhd_restpy.multivalue.Multivalue): 4 bytes Short SCI for XPN cipher suites.
"""
from uhd_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['RxSsci']))
def update(self, Name=None):
# type: (str) -> RxSakPool
"""Updates rxSakPool resource on the server.
This method has some named parameters with a type: obj (Multivalue).
The Multivalue class has documentation that details the possible values for those named parameters.
Args
----
- Name (str): Name of NGPF element, guaranteed to be unique in Scenario
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def get_device_ids(self, PortNames=None, RxSak128=None, RxSak256=None, RxSalt=None, RxSsci=None):
"""Base class infrastructure that gets a list of rxSakPool device ids encapsulated by this object.
Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.
Args
----
- PortNames (str): optional regex of port names
- RxSak128 (str): optional regex of rxSak128
- RxSak256 (str): optional regex of rxSak256
- RxSalt (str): optional regex of rxSalt
- RxSsci (str): optional regex of rxSsci
Returns
-------
- list(int): A list of device ids that meets the regex criteria provided in the method parameters
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._get_ngpf_device_ids(locals())
| [
"pdobrinskiy@yahoo.com"
] | pdobrinskiy@yahoo.com |
e0a788cfb8f516327f28f0651f84f2a9ac5f2324 | 8f40340d7a023e14260ce9e239f94c61d13c92af | /bregression/notebooks/compareScale.py | b3b0b90ea3290de1f90f2760cd42256242d4898d | [] | no_license | mdonega/HHbbgg_ETH | 15bbc683feff77ad1daef8e30dc4c778957499b7 | 938a83a2b1c14f7075ac4b996e420f693e814d15 | refs/heads/master | 2020-04-04T03:00:12.174266 | 2019-03-31T21:05:02 | 2019-03-31T21:05:02 | 155,703,739 | 0 | 0 | null | 2018-11-01T11:17:04 | 2018-11-01T11:17:04 | null | UTF-8 | Python | false | false | 6,172 | py |
# coding: utf-8
# In[9]:
import os
import sys; sys.path.append("~/HHbbgg_ETH_devel/bregression/python") # to load packages
import training_utils as utils
import numpy as np
reload(utils)
import preprocessing_utils as preprocessing
reload(preprocessing)
import plotting_utils as plotting
reload(plotting)
import optimization_utils as optimization
reload(optimization)
import postprocessing_utils as postprocessing
reload(postprocessing)
from sklearn.externals import joblib
import pandas as pd
import root_pandas as rpd
import matplotlib.pyplot as plt
import training_utils as utils
import ROOT
from ROOT import gROOT
ntuples = 'heppy_05_10_2017'
# "%" sign allows to interpret the rest as a system command
get_ipython().magic(u'env data=$utils.IO.ldata$ntuples')
files = get_ipython().getoutput(u'ls $data | sort -t_ -k 3 -n')
#ttbar= [s for s in files if "ttbar_RegressionPerJet_heppy_energyRings3_forTesting.root" in s] #energy rings large and proper sample with Jet_e
#ttbar= ["../../bregression//output_root/treeScaleResolution20p70_full_quantile_regression_alpha.root" ]
ttbar= ["../../bregression//output_root/treeScaleResolution20p70_minmax_full_quantile_regression_alpha.root" ]
treeName = 'reducedTree'
utils.IO.add_target(ntuples,ttbar,1)
utils.IO.add_features(ntuples,ttbar,1)
for i in range(len(utils.IO.targetName)):
print "using target file n."+str(i)+": "+utils.IO.targetName[i]
for i in range(len(utils.IO.featuresName)):
print "using features file n."+str(i)+": "+utils.IO.featuresName[i]
branch_names = 'Jet_pt_reg,Jet_pt,Jet_eta,Jet_mcFlavour,Jet_mcPt,noexpand:Jet_mcPt/Jet_pt,rho,Jet_mt,Jet_leadTrackPt,Jet_leptonPtRel,Jet_leptonDeltaR,Jet_neHEF,Jet_neEmEF,Jet_vtxPt,Jet_vtxMass,Jet_vtx3dL,Jet_vtxNtrk,Jet_vtx3deL,Jet_energyRing_dR0_em_Jet_e,Jet_energyRing_dR1_em_Jet_e,Jet_energyRing_dR2_em_Jet_e,Jet_energyRing_dR3_em_Jet_e,Jet_energyRing_dR4_em_Jet_e,Jet_energyRing_dR0_neut_Jet_e,Jet_energyRing_dR1_neut_Jet_e,Jet_energyRing_dR2_neut_Jet_e,Jet_energyRing_dR3_neut_Jet_e,Jet_energyRing_dR4_neut_Jet_e,Jet_energyRing_dR0_ch_Jet_e,Jet_energyRing_dR1_ch_Jet_e,Jet_energyRing_dR2_ch_Jet_e,Jet_energyRing_dR3_ch_Jet_e,Jet_energyRing_dR4_ch_Jet_e,Jet_energyRing_dR0_mu_Jet_e,Jet_energyRing_dR1_mu_Jet_e,Jet_energyRing_dR2_mu_Jet_e,Jet_energyRing_dR3_mu_Jet_e,Jet_energyRing_dR4_mu_Jet_e,Jet_numDaughters_pt03,nPVs,Jet_leptonPt,b_scale,b_res_20p70'.split(",") #
features = 'Jet_pt,Jet_eta,rho,Jet_mt,Jet_leadTrackPt,Jet_leptonPtRel,Jet_leptonDeltaR,Jet_neHEF,Jet_neEmEF,Jet_vtxPt,Jet_vtxMass,Jet_vtx3dL,Jet_vtxNtrk,Jet_vtx3deL,Jet_energyRing_dR0_em_Jet_e,Jet_energyRing_dR1_em_Jet_e,Jet_energyRing_dR2_em_Jet_e,Jet_energyRing_dR3_em_Jet_e,Jet_energyRing_dR4_em_Jet_e,Jet_energyRing_dR0_neut_Jet_e,Jet_energyRing_dR1_neut_Jet_e,Jet_energyRing_dR2_neut_Jet_e,Jet_energyRing_dR3_neut_Jet_e,Jet_energyRing_dR4_neut_Jet_e,Jet_energyRing_dR0_ch_Jet_e,Jet_energyRing_dR1_ch_Jet_e,Jet_energyRing_dR2_ch_Jet_e,Jet_energyRing_dR3_ch_Jet_e,Jet_energyRing_dR4_ch_Jet_e,Jet_energyRing_dR0_mu_Jet_e,Jet_energyRing_dR1_mu_Jet_e,Jet_energyRing_dR2_mu_Jet_e,Jet_energyRing_dR3_mu_Jet_e,Jet_energyRing_dR4_mu_Jet_e,Jet_numDaughters_pt03'.split(",") #
features_cat = 'Jet_pt,Jet_eta,nPVs,Jet_mt,Jet_leadTrackPt,Jet_leptonPtRel,Jet_leptonPt,Jet_leptonDeltaR,Jet_neHEF,Jet_neEmEF,Jet_vtxPt,Jet_vtxMass,Jet_vtx3dL,Jet_vtxNtrk,Jet_vtx3deL'.split(",") #same as Caterina
base_cuts='(Jet_pt > 20) & (Jet_eta<2.5 & Jet_eta>-2.5) & (Jet_mcFlavour==5 | Jet_mcFlavour==-5) & (Jet_mcPt>0) & (Jet_mcPt<6000) & (Jet_pt_reg>0)'
branch_names = [c.strip() for c in branch_names]
features = [c.strip() for c in features]
features_cat = [c.strip() for c in features_cat]
#pt_regions = '(Jet_mcPt>0),(Jet_mcPt<100),(Jet_mcPt>=100 & Jet_mcPt<300),(Jet_mcPt>=300 & Jet_mcPt<700),(Jet_mcPt>700)'.split(",")
pt_regions = '(Jet_mcPt>0),(Jet_mcPt<100),(Jet_mcPt>=100 & Jet_mcPt<300),(Jet_mcPt>=300 & Jet_mcPt<400),(Jet_mcPt>=400 & Jet_mcPt<600),(Jet_mcPt>=600)'.split(",")
eta_regions_names = '|Jet_eta|<0.5,|Jet_eta|>=0.5 & |Jet_eta|<1.0,|Jet_eta|>=1.0 & |Jet_eta|<1.5,|Jet_eta|>=1.5 & |Jet_eta|<2.0,|Jet_eta|>=2.0'.split(",")
eta_regions = '(Jet_eta<0.5 & Jet_eta>-0.5),((Jet_eta>=0.5 & Jet_eta<1.0) |(Jet_eta<=-0.5 & Jet_eta>-1.0)),(( Jet_eta>=1.0 & Jet_eta<1.5)|(Jet_eta<=-1.0 & Jet_eta>-1.5)),( (Jet_eta>=1.5 & Jet_eta<2.0)|(Jet_eta<=-1.5 & Jet_eta>=-2.0 )),(Jet_eta>=2.0 | Jet_eta<=-2.0)'.split(",")
region_names = pt_regions+eta_regions_names
X_pred_res_compare = []
outTagComparison = 'resolution_axis'
for i_r,region in enumerate(pt_regions+eta_regions):
#if (1>0):
# cuts = base_cuts
cuts = base_cuts+'&'+region
data_frame = (rpd.read_root(utils.IO.featuresName[0],treeName, columns = branch_names)).query(cuts)
X_features = preprocessing.set_features(treeName,branch_names,features,cuts)
X_features_cat = (preprocessing.set_features(treeName,branch_names,features_cat,cuts))
X_test_features = preprocessing.get_test_sample(pd.DataFrame(X_features),0.)
nTot,dictVar = postprocessing.stackFeaturesReg(data_frame,branch_names,5)
X_pred_res = nTot[:,dictVar['b_res_20p70']]
plotting.plot_hist_region(X_pred_res,region,outTagComparison,False)
# outTags = ['full_sample_wo_weights_opt_onwo','20p70_full_quantile_regression']
# X_predictions_compare = []
# if (i_r==0):
# for num in range(len(outTags)):
# outTag = outTags[num]
# if ('quantile' not in outTag) :
# loaded_model = joblib.load(os.path.expanduser('~/HHbbgg_ETH_devel/bregression/output_files/regression_heppy_'+outTag+'.pkl'))
# X_pred_data = loaded_model.predict(X_test_features).astype(np.float64)
# else : X_pred_data = nTot[:,dictVar['b_scale']]
# X_predictions_compare.append(X_pred_data)
#
# print len(X_predictions_compare[0]),len(X_predictions_compare[1])
# print min(X_predictions_compare[0]),min(X_predictions_compare[1])
# print max(X_predictions_compare[0]),max(X_predictions_compare[1])
# comparison_tags = outTags
# plotting.plot_hist(X_predictions_compare,outTagComparison,True,['reg','quantile min-max'])
| [
"nadezda.chernyavskaya@cern.ch"
] | nadezda.chernyavskaya@cern.ch |
6b77d8e8260bf6dcb9f443b9a700a1dfa9e73bc2 | 4678c79ba53884b8a18383d3bf5a312d2408a20a | /adanet/core/estimator.py | b46b61c933221791b9569c7f56d2058a88c14e89 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | mlzxy/adanet | af902854b8ed79accf3f48121970524bd3283a82 | 5f30fd61457fd6fafea6e4fa9eef178e3de6b9fa | refs/heads/master | 2021-10-10T06:59:04.818230 | 2019-01-07T20:33:24 | 2019-01-07T22:35:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,691 | py | """An AdaNet estimator implementation in Tensorflow using a single graph.
Copyright 2018 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import errno
import os
import time
from adanet.core.candidate import _CandidateBuilder
from adanet.core.ensemble import _EnsembleBuilder
from adanet.core.ensemble import MixtureWeightType
from adanet.core.iteration import _IterationBuilder
from adanet.core.report_accessor import _ReportAccessor
from adanet.core.summary import _ScopedSummary
from adanet.core.timer import _CountDownTimer
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.ops import resources
class _StopAfterTrainingHook(tf.train.SessionRunHook):
"""Hook that requests stop once iteration is over."""
def __init__(self, iteration, after_fn):
"""Initializes a `_StopAfterTrainingHook`.
Args:
iteration: An `_Iteration` instance.
after_fn: A function to call after training stopped.
Returns:
A `_StopAfterTrainingHook` instance.
"""
self._iteration = iteration
self._after_fn = after_fn
def before_run(self, run_context):
"""See `SessionRunHook`."""
del run_context # Unused
return tf.train.SessionRunArgs(self._iteration.is_over_fn())
def after_run(self, run_context, run_values):
"""See `SessionRunHook`."""
is_over = run_values.results
if not is_over:
return
run_context.request_stop()
self._after_fn()
class _EvalMetricSaverHook(tf.train.SessionRunHook):
"""A hook for writing evaluation metrics as summaries to disk."""
def __init__(self, name, eval_metric_ops, output_dir):
"""Initializes a `_EvalMetricSaverHook` instance.
Args:
name: String name of candidate owner of these metrics.
eval_metric_ops: Dict of metric results keyed by name. The values of the
dict are the results of calling a metric function, namely a
`(metric_tensor, update_op)` tuple. `metric_tensor` should be evaluated
without any impact on state (typically is a pure computation based on
variables.). For example, it should not trigger the `update_op` or
require any input fetching.
output_dir: Directory for writing evaluation summaries.
Returns:
An `_EvalMetricSaverHook` instance.
"""
self._name = name
self._eval_metric_ops = eval_metric_ops
self._output_dir = output_dir
def before_run(self, run_context):
"""See `SessionRunHook`."""
del run_context # Unused
return tf.train.SessionRunArgs(self._eval_metric_ops)
def _dict_to_str(self, dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ", ".join("%s = %s" % (k, v) for k, v in sorted(dictionary.items()))
def end(self, session):
"""See `SessionRunHook`."""
# Forked from tensorflow/python/estimator/estimator.py function called
# _write_dict_to_summary.
eval_dict = {}
for key, metric in self._eval_metric_ops.items():
eval_dict[key] = metric[0]
current_global_step = tf.train.get_global_step()
eval_dict, current_global_step = session.run((eval_dict,
current_global_step))
tf.logging.info("Saving candidate '%s' dict for global step %d: %s",
self._name, current_global_step,
self._dict_to_str(eval_dict))
summary_writer = tf.summary.FileWriterCache.get(self._output_dir)
summary_proto = tf.summary.Summary()
for key in eval_dict:
value = eval_dict[key]
if isinstance(value, (np.float32, float)):
summary_proto.value.add(tag=key, simple_value=float(value))
elif isinstance(value, six.binary_type):
summ = tf.summary.Summary.FromString(value)
for i, _ in enumerate(summ.value):
summ.value[i].tag = "%s/%d" % (key, i)
summary_proto.value.extend(summ.value)
else:
tf.logging.warn(
"Skipping summary for %s, must be a float, np.float32, "
"or a serialized string of Summary.", key)
summary_writer.add_summary(summary_proto, current_global_step)
summary_writer.flush()
class Estimator(tf.estimator.Estimator):
# pyformat: disable
r"""The AdaNet algorithm implemented as a :class:`tf.estimator.Estimator`.
AdaNet is as defined in the paper: https://arxiv.org/abs/1607.01097.
The AdaNet algorithm uses a weak learning algorithm to iteratively generate a
set of candidate subnetworks that attempt to minimize the loss function
defined in Equation (4) as part of an ensemble. At the end of each iteration,
the best candidate is chosen based on its ensemble's complexity-regularized
train loss. New subnetworks are allowed to use any subnetwork weights within
the previous iteration's ensemble in order to improve upon them. If the
complexity-regularized loss of the new ensemble, as defined in Equation (4),
is less than that of the previous iteration's ensemble, the AdaNet algorithm
continues onto the next iteration.
AdaNet attempts to minimize the following loss function to learn the mixture
weights 'w' of each subnetwork 'h' in the ensemble with differentiable
convex non-increasing surrogate loss function Phi:
Equation (4):
.. math::
F(w) = \frac{1}{m} \sum_{i=1}^{m} \Phi \left(\sum_{j=1}^{N}w_jh_j(x_i),
y_i \right) + \sum_{j=1}^{N} \left(\lambda r(h_j) + \beta \right) |w_j|
with :math:`\lambda >= 0` and :math:`\beta >= 0`.
This implementation uses an :class:`adanet.subnetwork.Generator` as its weak
learning algorithm for generating candidate subnetworks. These are trained in
parallel using a single graph per iteration. At the end of each iteration, the
estimator saves the sub-graph of the best subnetwork ensemble and its weights
as a separate checkpoint. At the beginning of the next iteration, the
estimator imports the previous iteration's frozen graph and adds ops for the
next candidates as part of a new graph and session. This allows the estimator
have the performance of Tensorflow's static graph constraint (minus the
performance hit of reconstructing a graph between iterations), while having
the flexibility of having a dynamic graph.
NOTE: Subclassing :class:`tf.estimator.Estimator` is only necessary to work
with :meth:`tf.estimator.train_and_evaluate` which asserts that the estimator
argument is a :class:`tf.estimator.Estimator` subclass. However, all training
is delegated to a separate :class:`tf.estimator.Estimator` instance. It is
responsible for supporting both local and distributed training. As such, the
:class:`adanet.Estimator` is only responsible for bookkeeping across
iterations.
Args:
head: A :class:`tf.contrib.estimator.Head` instance for computing loss and
evaluation metrics for every candidate.
subnetwork_generator: The :class:`adanet.subnetwork.Generator` which defines
the candidate subnetworks to train and evaluate at every AdaNet iteration.
max_iteration_steps: Total number of steps for which to train candidates per
iteration. If :class:`OutOfRange` or :class:`StopIteration` occurs in the
middle, training stops before `max_iteration_steps` steps.
mixture_weight_type: The :class:`adanet.MixtureWeightType` defining which
mixture weight type to learn in the linear combination of subnetwork
outputs:
- :class:`SCALAR`: creates a rank 0 tensor mixture weight . It performs
an element- wise multiplication with its subnetwork's logits. This
mixture weight is the simplest to learn, the quickest to train, and
most likely to generalize well.
- :class:`VECTOR`: creates a tensor with shape [k] where k is the
ensemble's logits dimension as defined by `head`. It is similar to
`SCALAR` in that it performs an element-wise multiplication with its
subnetwork's logits, but is more flexible in learning a subnetworks's
preferences per class.
- :class:`MATRIX`: creates a tensor of shape [a, b] where a is the
number of outputs from the subnetwork's `last_layer` and b is the
number of outputs from the ensemble's `logits`. This weight
matrix-multiplies the subnetwork's `last_layer`. This mixture weight
offers the most flexibility and expressivity, allowing subnetworks to
have outputs of different dimensionalities. However, it also has the
most trainable parameters (a*b), and is therefore the most sensitive
to learning rates and regularization.
mixture_weight_initializer: The initializer for mixture_weights. When
`None`, the default is different according to `mixture_weight_type`:
- :class:`SCALAR`: initializes to 1/N where N is the number of
subnetworks in the ensemble giving a uniform average.
- :class:`VECTOR`: initializes each entry to 1/N where N is the number
of subnetworks in the ensemble giving a uniform average.
- :class:`MATRIX`: uses :meth:`tf.zeros_initializer`.
warm_start_mixture_weights: Whether, at the beginning of an iteration, to
initialize the mixture weights of the subnetworks from the previous
ensemble to their learned value at the previous iteration, as opposed to
retraining them from scratch. Takes precedence over the value for
`mixture_weight_initializer` for subnetworks from previous iterations.
adanet_lambda: Float multiplier 'lambda' for applying L1 regularization to
subnetworks' mixture weights 'w' in the ensemble proportional to their
complexity. See Equation (4) in the AdaNet paper.
adanet_beta: Float L1 regularization multiplier 'beta' to apply equally to
all subnetworks' weights 'w' in the ensemble regardless of their
complexity. See Equation (4) in the AdaNet paper.
evaluator: An :class:`adanet.Evaluator` for candidate selection after all
subnetworks are done training. When `None`, candidate selection uses a
moving average of their :class:`adanet.Ensemble` AdaNet loss during
training instead. In order to use the *AdaNet algorithm* as described in
[Cortes et al., '17], the given :class:`adanet.Evaluator` must be created
with the same dataset partition used during training. Otherwise, this
framework will perform *AdaNet.HoldOut* which uses a holdout set for
candidate selection, but does not benefit from learning guarantees.
report_materializer: An :class:`adanet.ReportMaterializer`. Its reports are
made available to the `subnetwork_generator` at the next iteration, so
that it can adapt its search space. When `None`, the
`subnetwork_generator` :meth:`generate_candidates` method will receive
empty Lists for their `previous_ensemble_reports` and `all_reports`
arguments.
use_bias: Whether to add a bias term to the ensemble's logits. Adding a bias
allows the ensemble to learn a shift in the data, often leading to more
stable training and better predictions.
metric_fn: A function for adding custom evaluation metrics, which should
obey the following signature:
- `Args`:
Can only have the following three arguments in any order:
- `predictions`: Predictions `Tensor` or dict of `Tensor` created by
given `head`.
- `features`: Input `dict` of `Tensor` objects created by `input_fn`
which is given to `estimator.evaluate` as an argument.
- `labels`: Labels `Tensor` or dict of `Tensor` (for multi-head)
created by `input_fn` which is given to `estimator.evaluate` as an
argument.
- `Returns`: Dict of metric results keyed by name. Final metrics are a
union of this and `head's` existing metrics. If there is a name
conflict between this and `head`s existing metrics, this will override
the existing one. The values of the dict are the results of calling a
metric function, namely a `(metric_tensor, update_op)` tuple.
force_grow: Boolean override that forces the ensemble to grow by one
subnetwork at the end of each iteration. Normally at the end of each
iteration, AdaNet selects the best candidate ensemble according to its
performance on the AdaNet objective. In some cases, the best ensemble is
the `previous_ensemble` as opposed to one that includes a newly trained
subnetwork. When `True`, the algorithm will not select the
`previous_ensemble` as the best candidate, and will ensure that after n
iterations the final ensemble is composed of n subnetworks.
replicate_ensemble_in_training: Whether to rebuild the frozen subnetworks of
the ensemble in training mode, which can change the outputs of the frozen
subnetworks in the ensemble. When `False` and during candidate training,
the frozen subnetworks in the ensemble are in prediction mode, so
training-only ops like dropout are not applied to them. When `True` and
training the candidates, the frozen subnetworks will be in training mode
as well, so they will apply training-only ops like dropout. This argument
is useful for regularizing learning mixture weights, or for making
training-only side inputs available in subsequent iterations. For most
use-cases, this should be `False`.
adanet_loss_decay: Float decay for the exponential-moving-average of the
AdaNet objective throughout training. This moving average is a data-
driven way tracking the best candidate with only the training set.
worker_wait_timeout_secs: Float number of seconds for workers to wait for
chief to prepare the next iteration during distributed training. This is
needed to prevent workers waiting indefinitely for a chief that may have
crashed or been turned down. When the timeout is exceeded, the worker
exits the train loop. In situations where the chief job is much slower
than the worker jobs, this timeout should be increased.
model_dir: Directory to save model parameters, graph and etc. This can also
be used to load checkpoints from the directory into a estimator to
continue training a previously saved model.
report_dir: Directory where the `adanet.subnetwork.MaterializedReport`s
materialized by `report_materializer` would be saved. If
`report_materializer` is None, this will not save anything. If `None` or
empty string, defaults to "<model_dir>/report".
config: `RunConfig` object to configure the runtime settings.
**kwargs: Extra keyword args passed to the parent.
Returns:
An `Estimator` instance.
Raises:
ValueError: If `subnetwork_generator` is `None`.
ValueError: If `max_iteration_steps` is <= 0.
"""
# pyformat: enable
class _Keys(object):
CURRENT_ITERATION = "current_iteration"
EVALUATE_ENSEMBLES = "evaluate_ensembles"
MATERIALIZE_REPORT = "materialize_report"
INCREMENT_ITERATION = "increment_iteration"
PREVIOUS_ENSEMBLE_ARCHITECTURE = "previous_ensemble_architecture"
SUBNETWORK_GENERATOR = "subnetwork_generator"
def __init__(self,
head,
subnetwork_generator,
max_iteration_steps,
mixture_weight_type=MixtureWeightType.SCALAR,
mixture_weight_initializer=None,
warm_start_mixture_weights=False,
adanet_lambda=0.,
adanet_beta=0.,
evaluator=None,
report_materializer=None,
use_bias=False,
metric_fn=None,
force_grow=False,
replicate_ensemble_in_training=False,
adanet_loss_decay=.9,
worker_wait_timeout_secs=7200,
model_dir=None,
report_dir=None,
config=None,
**kwargs):
# TODO: Add argument to specify how many frozen graph
# checkpoints to keep.
if subnetwork_generator is None:
raise ValueError("subnetwork_generator can't be None.")
if max_iteration_steps <= 0.:
raise ValueError("max_iteration_steps must be > 0.")
self._subnetwork_generator = subnetwork_generator
self._adanet_loss_decay = adanet_loss_decay
# Overwrite superclass's assert that members are not overwritten in order
# to overwrite public methods. Note that we are doing something that is not
# explicitly supported by the Estimator API and may break in the future.
tf.estimator.Estimator._assert_members_are_not_overridden = staticmethod( # pylint: disable=protected-access
lambda _: None)
self._evaluation_checkpoint_path = None
self._evaluator = evaluator
self._report_materializer = report_materializer
self._force_grow = force_grow
self._worker_wait_timeout_secs = worker_wait_timeout_secs
self._evaluation_name = None
self._inside_adanet_training_loop = False
# This `Estimator` is responsible for bookkeeping across iterations, and
# for training the subnetworks in both a local and distributed setting.
# Subclassing improves future-proofing against new private methods being
# added to `tf.estimator.Estimator` that are expected to be callable by
# external functions, such as in b/110435640.
super(Estimator, self).__init__(
model_fn=self._adanet_model_fn,
params={},
config=config,
model_dir=model_dir,
**kwargs)
# These are defined after base Estimator's init so that they can
# use the same temporary model_dir as the underlying Estimator even if
# model_dir is not provided.
self._ensemble_builder = _EnsembleBuilder(
head=head,
mixture_weight_type=mixture_weight_type,
mixture_weight_initializer=mixture_weight_initializer,
warm_start_mixture_weights=warm_start_mixture_weights,
checkpoint_dir=self._model_dir,
adanet_lambda=adanet_lambda,
adanet_beta=adanet_beta,
use_bias=use_bias,
metric_fn=metric_fn)
candidate_builder = _CandidateBuilder(
max_steps=max_iteration_steps,
adanet_loss_decay=self._adanet_loss_decay)
self._iteration_builder = _IterationBuilder(candidate_builder,
self._ensemble_builder,
replicate_ensemble_in_training)
report_dir = report_dir or os.path.join(self._model_dir, "report")
self._report_accessor = _ReportAccessor(report_dir)
def _latest_checkpoint_iteration_number(self):
"""Returns the iteration number from the latest checkpoint."""
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
if latest_checkpoint is None:
return 0
return tf.contrib.framework.load_variable(latest_checkpoint,
self._Keys.CURRENT_ITERATION)
def _latest_checkpoint_architecture(self):
"""Returns the iteration number from the latest checkpoint."""
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
if latest_checkpoint is None:
return ""
return tf.contrib.framework.load_variable(
latest_checkpoint, self._Keys.PREVIOUS_ENSEMBLE_ARCHITECTURE)
def _latest_checkpoint_global_step(self):
"""Returns the global step from the latest checkpoint."""
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
if latest_checkpoint is None:
return 0
return tf.contrib.framework.load_variable(latest_checkpoint,
tf.GraphKeys.GLOBAL_STEP)
@contextlib.contextmanager
def _train_loop_context(self):
"""Tracks where the context is within the AdaNet train loop."""
self._inside_adanet_training_loop = True
yield
self._inside_adanet_training_loop = False
def train(self,
input_fn,
hooks=None,
steps=None,
max_steps=None,
saving_listeners=None):
if (steps is not None) and (max_steps is not None):
raise ValueError("Can not provide both steps and max_steps.")
if steps is not None and steps <= 0:
raise ValueError("Must specify steps > 0, given: {}".format(steps))
if steps is not None:
max_steps = self._latest_checkpoint_global_step() + steps
# Each iteration of this AdaNet loop represents an `_Iteration`. The
# current iteration number is stored as a variable in the checkpoint so
# that training can be stopped and started at anytime.
with self._train_loop_context():
while True:
current_iteration = self._latest_checkpoint_iteration_number()
tf.logging.info("Beginning training AdaNet iteration %s",
current_iteration)
self._iteration_ended = False
result = super(Estimator, self).train(
input_fn=input_fn,
hooks=hooks,
max_steps=max_steps,
saving_listeners=saving_listeners)
tf.logging.info("Finished training Adanet iteration %s",
current_iteration)
# If training ended because the maximum number of training steps
# occurred, exit training.
if self._latest_checkpoint_global_step() >= max_steps:
return result
# If training ended for any reason other than the iteration ending,
# exit training.
if not self._iteration_ended:
return result
tf.logging.info("Beginning bookkeeping phase for iteration %s",
current_iteration)
# The chief prepares the next AdaNet iteration, and increments the
# iteration number by 1.
if self.config.is_chief:
# As the chief, store the train hooks and make a placeholder input_fn
# in order to use them when preparing the next iteration.
self._train_hooks = hooks or ()
self._prepare_next_iteration(input_fn)
# This inner loop serves mainly for synchronizing the workers with the
# chief during distributed training. Workers that finish training early
# wait for the chief to prepare the next iteration and increment the
# iteration number. Workers that are slow to finish training quickly
# move onto the next iteration. And workers that go offline and return
# online after training ended terminate gracefully.
wait_for_chief = not self.config.is_chief
timer = _CountDownTimer(self._worker_wait_timeout_secs)
while wait_for_chief:
# If the chief hits max_steps, it will stop training itself and not
# increment the iteration number, so this is how the worker knows to
# exit if it wakes up and the chief is gone.
# TODO: Support steps parameter.
if self._latest_checkpoint_global_step() >= max_steps:
return result
# In distributed training, a worker may end training before the chief
# overwrites the checkpoint with the incremented iteration number. If
# that is the case, it should wait for the chief to do so. Otherwise
# the worker will get stuck waiting for its weights to be initialized.
next_iteration = self._latest_checkpoint_iteration_number()
if next_iteration > current_iteration:
break
# Check timeout when waiting for potentially downed chief.
if timer.secs_remaining() == 0:
tf.logging.error(
"Chief job did not prepare next iteration after %s secs. It "
"may have been preempted, been turned down, or crashed. This "
"worker is now exiting training.",
self._worker_wait_timeout_secs)
return result
tf.logging.info("Waiting for chief to finish")
time.sleep(5)
# Stagger starting workers to prevent training instability.
if not self.config.is_chief:
task_id = self.config.task_id or 0
# Wait 5 secs more for each new worker up to 60 secs.
delay_secs = min(60, task_id * 5)
tf.logging.info("Waiting %d secs before starting training.",
delay_secs)
time.sleep(delay_secs)
tf.logging.info("Finished bookkeeping phase for iteration %s",
current_iteration)
def evaluate(self,
input_fn,
steps=None,
hooks=None,
checkpoint_path=None,
name=None):
if not checkpoint_path:
checkpoint_path = tf.train.latest_checkpoint(self.model_dir)
# Ensure that the read to get the iteration number and read to restore
# variable values come from the same checkpoint during evaluation.
self._evaluation_checkpoint_path = checkpoint_path
self._evaluation_name = name
result = super(Estimator, self).evaluate(
input_fn,
steps=steps,
hooks=hooks,
checkpoint_path=checkpoint_path,
name=name)
self._evaluation_checkpoint_path = None
return result
def _call_adanet_model_fn(self, input_fn, mode, params):
"""Calls model_fn with the given mode and parameters."""
with tf.Graph().as_default():
tf.set_random_seed(self.config.tf_random_seed)
# Create global step before calling model_fn as does superclass.
tf.train.get_or_create_global_step()
features, labels = input_fn()
self._adanet_model_fn(features, labels, mode, params)
def _prepare_next_iteration(self, train_input_fn):
"""Prepares the next iteration.
This method calls model_fn up to four times:
1. To evaluate all candidate ensembles to find the best one.
2. To materialize reports and store them to disk (if report_materializer
exists).
3. To overwrite the model directory's checkpoint with the next iteration's
ops.
Args:
train_input_fn: The input_fn used during training.
"""
# First, evaluate and choose the best ensemble for this iteration.
params = self.params.copy()
params[self._Keys.EVALUATE_ENSEMBLES] = True
if self._evaluator:
evaluator_input_fn = self._evaluator.input_fn
else:
evaluator_input_fn = train_input_fn
self._call_adanet_model_fn(evaluator_input_fn, tf.estimator.ModeKeys.EVAL,
params)
# Then materialize and store the subnetwork reports.
if self._report_materializer:
params = self.params.copy()
params[self._Keys.MATERIALIZE_REPORT] = True
self._call_adanet_model_fn(self._report_materializer.input_fn,
tf.estimator.ModeKeys.EVAL, params)
self._best_ensemble_index = None
# Finally, create the graph for the next iteration and overwrite the model
# directory checkpoint with the expanded graph.
params = self.params.copy()
params[self._Keys.INCREMENT_ITERATION] = True
self._call_adanet_model_fn(train_input_fn, tf.estimator.ModeKeys.TRAIN,
params)
def _architecture_filename(self, iteration_number):
"""Returns the filename of the given iteration's frozen graph."""
frozen_checkpoint = os.path.join(self.model_dir, "architecture")
return "{}-{}.txt".format(frozen_checkpoint, iteration_number)
def _overwrite_checkpoint(self, current_iteration, iteration_number_tensor):
"""Overwrites the latest checkpoint with the current graph.
This is necessary for two reasons:
1. To add variables to the checkpoint that were newly created for the
next iteration. Otherwise Estimator will raise an exception for having a
checkpoint missing variables.
2. To increment the current iteration number so that workers know when to
begin training the next iteration.
Args:
current_iteration: Current `_Iteration` object.
iteration_number_tensor: Int variable `Tensor` storing the current
iteration number.
"""
checkpoint_state = tf.train.get_checkpoint_state(self.model_dir)
latest_checkpoint = checkpoint_state.model_checkpoint_path
if not latest_checkpoint:
return
# Run train hook 'begin' methods which can add ops to the graph, so that
# they are still present in the overwritten checkpoint.
train_hooks = tuple(self._train_hooks) or ()
for candidate in current_iteration.candidates:
if not candidate.ensemble_spec.subnetwork_train_op:
assert not candidate.ensemble_spec.ensemble_train_op
continue
train_hooks += candidate.ensemble_spec.subnetwork_train_op.chief_hooks
train_hooks += candidate.ensemble_spec.subnetwork_train_op.hooks
train_hooks += candidate.ensemble_spec.ensemble_train_op.chief_hooks
train_hooks += candidate.ensemble_spec.ensemble_train_op.hooks
for hook in train_hooks:
hook.begin()
global_step_tensor = tf.train.get_global_step()
global_step = tf.contrib.framework.load_variable(latest_checkpoint,
tf.GraphKeys.GLOBAL_STEP)
checkpoint_path = os.path.join(self.model_dir, "increment.ckpt")
with tf.Session(target=self.config.master) as sess:
init = tf.group(
tf.global_variables_initializer(), tf.local_variables_initializer(),
tf.tables_initializer(),
resources.initialize_resources(resources.shared_resources()))
sess.run(init)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
control_deps = [
tf.assign(global_step_tensor, global_step),
tf.assign(iteration_number_tensor, current_iteration.number),
]
with tf.control_dependencies(control_deps):
saver = tf.train.Saver(
sharded=True, max_to_keep=self.config.keep_checkpoint_max)
saver.recover_last_checkpoints(
checkpoint_state.all_model_checkpoint_paths)
saver.save(sess, checkpoint_path, global_step=current_iteration.number)
for hook in train_hooks:
hook.end(sess)
def _get_best_ensemble_index(self, current_iteration):
"""Returns the best candidate ensemble's index in this iteration.
Evaluates the ensembles using an `Evaluator` when provided. Otherwise,
it returns the index of the best candidate as defined by the `_Iteration`.
Args:
current_iteration: Current `_Iteration`.
Returns:
Index of the best ensemble in the iteration's list of `_Candidates`.
"""
# Skip the evaluation phase when there is only one candidate subnetwork.
if len(current_iteration.candidates) == 1:
tf.logging.info(
"As the only candidate, '%s' is moving onto the next iteration.",
current_iteration.candidates[0].ensemble_spec.name)
return 0
# The zero-th index candidate at iteration t>0 is always the
# previous_ensemble.
if current_iteration.number > 0 and self._force_grow and (len(
current_iteration.candidates) == 2):
tf.logging.info(
"As the only candidate with `force_grow` enabled, '%s' is moving"
"onto the next iteration.",
current_iteration.candidates[1].ensemble_spec.name)
return 1
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
tf.logging.info("Starting ensemble evaluation for iteration %s",
current_iteration.number)
with tf.Session() as sess:
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer(), tf.tables_initializer())
sess.run(init)
saver = tf.train.Saver(sharded=True)
saver.restore(sess, latest_checkpoint)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
if self._evaluator:
adanet_losses = [
c.ensemble_spec.adanet_loss for c in current_iteration.candidates
]
adanet_losses = self._evaluator.evaluate_adanet_losses(
sess, adanet_losses)
else:
adanet_losses = sess.run(
[c.adanet_loss for c in current_iteration.candidates])
values = []
for i in range(len(current_iteration.candidates)):
metric_name = "adanet_loss"
ensemble_name = current_iteration.candidates[i].ensemble_spec.name
values.append("{}/{} = {:.6f}".format(metric_name, ensemble_name,
adanet_losses[i]))
tf.logging.info("Computed ensemble metrics: %s", ", ".join(values))
if self._force_grow and current_iteration.number > 0:
tf.logging.info(
"The `force_grow` override is enabled, so the "
"the performance of the previous ensemble will be ignored.")
# NOTE: The zero-th index candidate at iteration t>0 is always the
# previous_ensemble.
adanet_losses = adanet_losses[1:]
index = np.argmin(adanet_losses) + 1
else:
index = np.argmin(adanet_losses)
tf.logging.info("Finished ensemble evaluation for iteration %s",
current_iteration.number)
tf.logging.info("'%s' at index %s is moving onto the next iteration",
current_iteration.candidates[index].ensemble_spec.name,
index)
return index
def _materialize_report(self, current_iteration):
"""Generates reports as defined by `Builder`s.
Materializes the Tensors and metrics defined in the `Builder`s'
`build_subnetwork_report` method using `ReportMaterializer`, and stores
them to disk using `_ReportAccessor`.
Args:
current_iteration: Current `_Iteration`.
"""
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
tf.logging.info("Starting metric logging for iteration %s",
current_iteration.number)
assert self._best_ensemble_index is not None
best_candidate = current_iteration.candidates[self._best_ensemble_index]
best_ensemble = best_candidate.ensemble_spec.ensemble
best_name = best_ensemble.weighted_subnetworks[-1].name
included_subnetwork_names = [best_name]
with tf.Session() as sess:
init = tf.group(tf.global_variables_initializer(),
tf.local_variables_initializer(), tf.tables_initializer())
sess.run(init)
saver = tf.train.Saver(sharded=True)
saver.restore(sess, latest_checkpoint)
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
materialized_reports = (
self._report_materializer.materialize_subnetwork_reports(
sess, current_iteration.number,
current_iteration.subnetwork_reports, included_subnetwork_names))
self._report_accessor.write_iteration_report(current_iteration.number,
materialized_reports)
tf.logging.info("Finished saving subnetwork reports for iteration %s",
current_iteration.number)
def _training_hooks(self, current_iteration, training):
"""Returns training hooks for this iteration.
Args:
current_iteration: Current `_Iteration`.
training: Whether in training mode.
Returns:
A list of `tf.train.SessionRunHook` instances.
"""
if not training:
return []
def after_fn():
self._iteration_ended = True
training_hooks = list(current_iteration.estimator_spec.training_hooks) + [
_StopAfterTrainingHook(current_iteration, after_fn=after_fn)
]
for summary in current_iteration.summaries:
output_dir = self.model_dir
if summary.scope:
output_dir = os.path.join(output_dir, "candidate", summary.scope)
summary_saver_hook = tf.train.SummarySaverHook(
save_steps=self.config.save_summary_steps,
output_dir=output_dir,
summary_op=summary.merge_all())
training_hooks.append(summary_saver_hook)
return training_hooks
def _evaluation_hooks(self, current_iteration, training):
"""Returns evaluation hooks for this iteration.
Args:
current_iteration: Current `_Iteration`.
training: Whether in training mode.
Returns:
A list of `tf.train.SessionRunHook` instances.
"""
if training:
return []
evaluation_hooks = []
for candidate in current_iteration.candidates:
eval_subdir = "eval"
if self._evaluation_name:
eval_subdir = "eval_{}".format(self._evaluation_name)
eval_metric_hook = _EvalMetricSaverHook(
name=candidate.ensemble_spec.name,
eval_metric_ops=candidate.ensemble_spec.eval_metric_ops,
output_dir=os.path.join(self.model_dir, "candidate",
candidate.ensemble_spec.name, eval_subdir))
evaluation_hooks.append(eval_metric_hook)
return evaluation_hooks
def _save_architecture(self, filename, ensemble):
"""Persists the ensemble's architecture in a serialized format.
Writes to a text file with one subnetwork's iteration number and name
per line.
Args:
filename: String filename to persist the ensemble architecture.
ensemble: Target `adanet.Ensemble` instance.
"""
architecture = [
"{}:{}".format(w.iteration_number, w.name)
for w in ensemble.weighted_subnetworks
]
# Make directories since model_dir may not have been created yet.
tf.gfile.MakeDirs(os.path.dirname(filename))
with tf.gfile.GFile(filename, "w") as record_file:
record_file.write(os.linesep.join(architecture))
def _read_architecture(self, filename):
"""Reads an ensemble architecture from disk.
Assumes the file was written with `_save_architecture`.
Args:
filename: String filename where features were recorded.
Returns:
A list of <iteration_number>:<subnetwork name> strings.
Raises:
OSError: When file not found at `filename`.
"""
if not tf.gfile.Exists(filename):
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), filename)
architecture = []
with tf.gfile.GFile(filename, "r") as record_file:
for line in record_file:
feature_name = line.rstrip()
architecture.append(feature_name)
return architecture
# TODO: Refactor architecture building logic to its own module.
def _architecture_ensemble_spec(self, architecture, features, mode, labels):
"""Returns an `_EnsembleSpec` with the given architecture.
Creates the ensemble architecture by calling `generate_subnetworks` on
`self._subnetwork_generator` and only calling `build_subnetwork` on
`Builders` included in the architecture. Once their ops are created, their
variables are restored from the checkpoint.
Args:
architecture: A list of <iteration_number>:<subnetwork name> strings.
features: Dictionary of `Tensor` objects keyed by feature name.
mode: Defines whether this is training, evaluation or prediction. See
`ModeKeys`.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head). Can be `None`.
Returns:
An `EnsembleSpec` instance for the given architecture.
Raises:
ValueError: If a subnetwork from `architecture` is not found in the
generated candidate `Builders` of the specified iteration.
"""
previous_ensemble_spec = None
previous_ensemble = None
for serialized_subnetwork in architecture:
serialized_iteration_number, name = serialized_subnetwork.split(":")
rebuild_iteration_number = int(serialized_iteration_number)
previous_ensemble_reports, all_reports = [], []
if self._report_materializer:
previous_ensemble_reports, all_reports = (
self._collate_subnetwork_reports(rebuild_iteration_number))
generated_subnetwork_builders = (
self._subnetwork_generator.generate_candidates(
previous_ensemble=previous_ensemble,
iteration_number=rebuild_iteration_number,
previous_ensemble_reports=previous_ensemble_reports,
all_reports=all_reports))
rebuild_subnetwork_builder = None
for builder in generated_subnetwork_builders:
if builder.name == name:
rebuild_subnetwork_builder = builder
break
if rebuild_subnetwork_builder is None:
raise ValueError("Required subnetwork name is missing from "
"generated candidates: {}".format(name))
previous_ensemble_summary = None
if previous_ensemble_spec:
# Always skip summaries when rebuilding previous architecture,
# since they are not useful.
previous_ensemble_summary = _ScopedSummary(
previous_ensemble_spec.name, skip_summary=True)
current_iteration = self._iteration_builder.build_iteration(
iteration_number=rebuild_iteration_number,
subnetwork_builders=[rebuild_subnetwork_builder],
features=features,
labels=labels,
mode=mode,
previous_ensemble_summary=previous_ensemble_summary,
previous_ensemble_spec=previous_ensemble_spec,
rebuilding=True)
previous_ensemble_spec = current_iteration.candidates[-1].ensemble_spec
previous_ensemble = previous_ensemble_spec.ensemble
return previous_ensemble_spec
def _collate_subnetwork_reports(self, iteration_number):
"""Prepares subnetwork.Reports to be passed to Generator.
Reads subnetwork.MaterializedReports from past iterations,
collates those that were included in previous_ensemble into
previous_ensemble_reports as a List of subnetwork.MaterializedReports,
and collates all reports from previous iterations into all_reports as
another List of subnetwork.MaterializedReports.
Args:
iteration_number: Python integer AdaNet iteration number, starting from 0.
Returns:
(previous_ensemble_reports: List<subnetwork.MaterializedReport>,
materialized_reports: List<MaterializedReport>)
"""
materialized_reports_all = (self._report_accessor.read_iteration_reports())
previous_ensemble_reports = []
all_reports = []
# Since the number of iteration reports changes after the
# MATERIALIZE_REPORT phase, we need to make sure that we always pass the
# same reports to the Generator in the same iteration,
# otherwise the graph that is built in the FREEZE_ENSEMBLE phase would be
# different from the graph built in the training phase.
# Iteration 0 should have 0 iteration reports passed to the
# Generator, since there are no previous iterations.
# Iteration 1 should have 1 list of reports for Builders
# generated in iteration 0.
# Iteration 2 should have 2 lists of reports -- one for iteration 0,
# one for iteration 1. Note that the list of reports for iteration >= 1
# should contain "previous_ensemble", in addition to the
# Builders at the start of that iteration.
# Iteration t should have t lists of reports.
for i, iteration_reports in enumerate(materialized_reports_all):
# This ensures that the FREEZE_ENSEMBLE phase does not pass the reports
# generated in the previous phase of the same iteration to the
# Generator when building the graph.
if i >= iteration_number:
break
# Assumes that only one subnetwork is added to the ensemble in
# each iteration.
chosen_subnetwork_in_this_iteration = [
subnetwork_report for subnetwork_report in iteration_reports
if subnetwork_report.included_in_final_ensemble
][0]
previous_ensemble_reports.append(chosen_subnetwork_in_this_iteration)
all_reports.extend(iteration_reports)
return previous_ensemble_reports, all_reports
def _adanet_model_fn(self, features, labels, mode, params):
"""AdaNet model_fn.
This model_fn is called at least three times per iteration:
1. The first call generates, builds, and trains the candidate subnetworks
to ensemble in this iteration.
2. Once training is over, bookkeeping begins. The next call is to evaluate
the best candidate ensembles according to the AdaNet objective.
2.b. Optionally, when a report materializer is provided, another call
creates the graph for producing subnetwork reports for the next iteration
and other AdaNet runs.
3. The final call is responsible for rebuilding the ensemble architecture
from t-1 by regenerating the best builders and warm-starting their weights,
adding ops and initialing the weights for the next candidate subnetworks,
and overwriting the latest checkpoint with its graph and variables, so that
first call of the next iteration has the right variables in the checkpoint.
Args:
features: Dictionary of `Tensor` objects keyed by feature name.
labels: Labels `Tensor` or a dictionary of string label name to `Tensor`
(for multi-head). Can be `None`.
mode: Defines whether this is training, evaluation or prediction. See
`ModeKeys`.
params: A dict of parameters.
Returns:
A `EstimatorSpec` instance.
Raises:
UserWarning: When calling model_fn directly in TRAIN mode.
"""
training = mode == tf.estimator.ModeKeys.TRAIN
if training and not self._inside_adanet_training_loop:
raise UserWarning(
"The adanet.Estimator's model_fn should not be called directly in "
"TRAIN mode, because its behavior is undefined outside the context "
"of its `train` method. If you are trying to add custom metrics "
"with `tf.contrib.estimator.add_metrics`, pass the `metric_fn` to "
"this `Estimator's` constructor instead.")
iteration_number = self._latest_checkpoint_iteration_number()
# Use the evaluation checkpoint path to get both the iteration number and
# variable values to avoid any race conditions between the first and second
# checkpoint reads.
if mode == tf.estimator.ModeKeys.EVAL and self._evaluation_checkpoint_path:
iteration_number = tf.contrib.framework.load_variable(
self._evaluation_checkpoint_path, self._Keys.CURRENT_ITERATION)
if self._Keys.INCREMENT_ITERATION in params:
iteration_number += 1
architecture_filename = self._architecture_filename(iteration_number - 1)
architecture = []
if tf.gfile.Exists(architecture_filename):
architecture = self._read_architecture(architecture_filename)
tf.logging.info(
"Importing architecture from %s: [%s].", architecture_filename,
", ".join(sorted(["'{}'".format(f) for f in architecture])))
skip_summaries = mode == tf.estimator.ModeKeys.PREDICT
with tf.variable_scope("adanet"):
previous_ensemble_spec = None
previous_ensemble = None
previous_ensemble_summary = None
if architecture:
previous_ensemble_spec = self._architecture_ensemble_spec(
architecture, features, mode, labels)
previous_ensemble = previous_ensemble_spec.ensemble
previous_ensemble_summary = _ScopedSummary(
previous_ensemble_spec.name, skip_summary=skip_summaries)
if self._Keys.INCREMENT_ITERATION in params:
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
tf.train.warm_start(latest_checkpoint, vars_to_warm_start=[".*"])
previous_ensemble_reports, all_reports = [], []
if self._report_materializer:
previous_ensemble_reports, all_reports = (
self._collate_subnetwork_reports(iteration_number))
subnetwork_builders = self._subnetwork_generator.generate_candidates(
previous_ensemble=previous_ensemble,
iteration_number=iteration_number,
previous_ensemble_reports=previous_ensemble_reports,
all_reports=all_reports)
current_iteration = self._iteration_builder.build_iteration(
iteration_number=iteration_number,
subnetwork_builders=subnetwork_builders,
features=features,
labels=labels,
mode=mode,
previous_ensemble_summary=previous_ensemble_summary,
previous_ensemble_spec=previous_ensemble_spec)
# Variable which allows us to read the current iteration from a checkpoint.
iteration_number_tensor = tf.get_variable(
self._Keys.CURRENT_ITERATION,
shape=[],
dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False)
adanet_summary = _ScopedSummary("global", skip_summaries)
adanet_summary.scalar("iteration/adanet/iteration", iteration_number_tensor)
adanet_summary.scalar("iteration_step/adanet/iteration_step",
current_iteration.step)
if current_iteration.estimator_spec.loss is not None:
adanet_summary.scalar("loss", current_iteration.estimator_spec.loss)
adanet_summary.scalar("loss/adanet/adanet_weighted_ensemble",
current_iteration.estimator_spec.loss)
iteration_estimator_spec = current_iteration.estimator_spec
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions=iteration_estimator_spec.predictions,
loss=iteration_estimator_spec.loss,
train_op=iteration_estimator_spec.train_op,
eval_metric_ops=iteration_estimator_spec.eval_metric_ops,
training_chief_hooks=iteration_estimator_spec.training_chief_hooks,
training_hooks=self._training_hooks(current_iteration, training),
evaluation_hooks=self._evaluation_hooks(current_iteration, training),
scaffold=tf.train.Scaffold(summary_op=adanet_summary.merge_all()),
export_outputs=iteration_estimator_spec.export_outputs)
if self._Keys.EVALUATE_ENSEMBLES in params:
assert self.config.is_chief
self._best_ensemble_index = self._get_best_ensemble_index(
current_iteration)
ensemble = current_iteration.candidates[
self._best_ensemble_index].ensemble_spec.ensemble
new_architecture_filename = self._architecture_filename(iteration_number)
self._save_architecture(new_architecture_filename, ensemble)
elif self._Keys.MATERIALIZE_REPORT in params:
assert self.config.is_chief
assert self._best_ensemble_index is not None
self._materialize_report(current_iteration)
elif self._Keys.INCREMENT_ITERATION in params:
assert self.config.is_chief
latest_checkpoint = tf.train.latest_checkpoint(self.model_dir)
tf.logging.info(
"Overwriting checkpoint with new graph for iteration %s to %s",
iteration_number, latest_checkpoint)
self._overwrite_checkpoint(current_iteration, iteration_number_tensor)
return estimator_spec
| [
"weill@google.com"
] | weill@google.com |
7d3fc3ee1fbadfbfdeae383c58c42296cb0e2128 | 73b158f51285300c1d3456b7af9163939ee206f2 | /DevOps/sprint03/t00_lambda/expression.py | 0ddfdb9345aef1a2a08f49f12d1afab8728d3beb | [] | no_license | nnocturnnn/DevOps | 2e332b3552a5b294b36d2af7de854aa18f2da46f | 173c75938e65be8fbbb5c02c3d655d09df9a2931 | refs/heads/master | 2023-06-11T07:21:14.097930 | 2021-06-30T13:58:15 | 2021-06-30T13:58:15 | 352,070,911 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py |
n = int(input('n: '))
a = int(input('a: '))
b = int(input('b: '))
result = lambda a, b, n : n % a == 0 and n % b == 0
print(result(a,b,n)) | [
"vikchehovich@gmail.com"
] | vikchehovich@gmail.com |
10f4f3a055566d43dc7c35839294dee6e6843fc8 | 8246502a8c7935ce0cd98352d6707041703b0b98 | /src/acquisition/cdcp/cdc_extract.py | faa4bf7e31128207b835787d6c02a8cbfd95b685 | [
"MIT"
] | permissive | jarad/delphi-epidata | 0ad7e62888d0570c8de5d11ee2e1f610219c1f29 | 6cbde914a055a86b5deba8fa49528604e4db0cc8 | refs/heads/master | 2021-06-23T14:38:18.018736 | 2017-09-07T04:08:20 | 2017-09-07T04:08:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,140 | py | """
===============
=== Purpose ===
===============
Extract a useful subset of the CDC data. Reads from `cdc` and `cdc_meta` (all
pages, and daily resolution) and writes to `cdc_extract` (selected pages, and
weekly resolution). The Epidata API is then used to read from `cdc_extract` and
update the `sensors` and `nowcasts` tables.
This is necessary because the `cdc` table is huge, and string matching is slow.
If runtime was not an issue, everything could (in theory) be done in pure SQL
using just the `cdc` and `cdc_meta` tables.
A similar preprocessing step is performed for the wikipedia dataset in
signal_update.py.
=======================
=== Data Dictionary ===
=======================
`cdc_extract` is the table where the data is stored.
+---------+---------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+---------+---------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| epiweek | int(11) | NO | MUL | NULL | |
| state | char(2) | NO | MUL | NULL | |
| num1 | int(11) | NO | | NULL | |
| num2 | int(11) | NO | | NULL | |
| num3 | int(11) | NO | | NULL | |
| num4 | int(11) | NO | | NULL | |
| num5 | int(11) | NO | | NULL | |
| num6 | int(11) | NO | | NULL | |
| num7 | int(11) | NO | | NULL | |
| num8 | int(11) | NO | | NULL | |
| total | int(11) | NO | | NULL | |
+---------+---------+------+-----+---------+----------------+
id: unique identifier for each record
epiweek: the epiweek during which the data was collected
state: where the data was collected (51 states, including DC)
num1: hits for pages like '%What You Should Know for the % Influenza Season%'
num2: hits for pages like '%What To Do If You Get Sick%'
num3: hits for pages like '%Flu Symptoms & Severity%'
num4: hits for pages like '%How Flu Spreads%'
num5: hits for pages like '%What You Should Know About Flu Antiviral Drugs%'
num6: hits for pages like '%Weekly US Map%'
num7: hits for pages like '%Basics%'
num8: hits for pages like '%Flu Activity & Surveillance%'
total: total number of hits for all CDC pages
=================
=== Changelog ===
=================
2017-02-23
* secrets and minor cleanup
2016-04-16
+ initial version
"""
# standard library
import argparse
import sys
# third party
import mysql.connector
import numpy as np
# first party
import cdc_upload
from epidate import EpiDate
import epiweek as flu
import secrets
def get_num_hits(cur, epiweek, state, page):
sql = '''
SELECT
sum(c.`num`) `num`
FROM
`cdc` c
JOIN
`cdc_meta` m
ON
m.`date` = c.`date` AND m.`state` = c.`state`
WHERE
m.`epiweek` = %s AND c.`state` = %s AND c.`page` LIKE %s
'''
num = None
cur.execute(sql, (epiweek, state, page))
for (num,) in cur:
pass
if num is None:
return 0
else:
return num
def get_total_hits(cur, epiweek, state):
sql = '''
SELECT
sum(m.`total`) `total`
FROM
`cdc_meta` m
WHERE
m.`epiweek` = %s AND m.`state` = %s
'''
total = None
cur.execute(sql, (epiweek, state))
for (total,) in cur:
pass
if total is None:
raise Exception('missing data for %d-%s' % (epiweek, state))
else:
return total
def store_result(cur, epiweek, state, num1, num2, num3, num4, num5, num6, num7, num8, total):
sql = '''
INSERT INTO
`cdc_extract` (`epiweek`, `state`, `num1`, `num2`, `num3`, `num4`, `num5`, `num6`, `num7`, `num8`, `total`)
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
ON DUPLICATE KEY UPDATE
`num1` = %s,
`num2` = %s,
`num3` = %s,
`num4` = %s,
`num5` = %s,
`num6` = %s,
`num7` = %s,
`num8` = %s,
`total` = %s
'''
values = [num1, num2, num3, num4, num5, num6, num7, num8, total]
args = tuple([epiweek, state] + values + values)
cur.execute(sql, args)
def extract(first_week=None, last_week=None, test_mode=False):
# page title templates
pages = [
'%What You Should Know for the % Influenza Season%',
'%What To Do If You Get Sick%',
'%Flu Symptoms & Severity%',
'%How Flu Spreads%',
'%What You Should Know About Flu Antiviral Drugs%',
'%Weekly US Map%',
'%Basics%',
'%Flu Activity & Surveillance%',
]
# location information
states = sorted(cdc_upload.STATES.values())
# connect
u, p = secrets.db.epi
cnx = mysql.connector.connect(user=u, password=p, database='epidata')
cur = cnx.cursor()
# weeks to update
if first_week is None:
cur.execute('SELECT max(`epiweek`) FROM `cdc_extract`')
for (first_week,) in cur:
pass
if last_week is None:
cur.execute('SELECT max(`epiweek`) FROM `cdc_meta`')
for (last_week,) in cur:
pass
print('extracting %d--%d' % (first_week, last_week))
# update each epiweek
for epiweek in flu.range_epiweeks(first_week, last_week, inclusive=True):
# update each state
for state in states:
try:
num1 = get_num_hits(cur, epiweek, state, pages[0])
num2 = get_num_hits(cur, epiweek, state, pages[1])
num3 = get_num_hits(cur, epiweek, state, pages[2])
num4 = get_num_hits(cur, epiweek, state, pages[3])
num5 = get_num_hits(cur, epiweek, state, pages[4])
num6 = get_num_hits(cur, epiweek, state, pages[5])
num7 = get_num_hits(cur, epiweek, state, pages[6])
num8 = get_num_hits(cur, epiweek, state, pages[7])
total = get_total_hits(cur, epiweek, state)
store_result(cur, epiweek, state, num1, num2, num3, num4, num5, num6, num7, num8, total)
print(' %d-%s: %d %d %d %d %d %d %d %d (%d)' % (epiweek, state, num1, num2, num3, num4, num5, num6, num7, num8, total))
except Exception as ex:
print(' %d-%s: failed' % (epiweek, state), ex)
#raise ex
sys.stdout.flush()
# disconnect
cur.close()
if not test_mode:
cnx.commit()
cnx.close()
def main():
# args and usage
parser = argparse.ArgumentParser()
parser.add_argument('--first', '-f', default=None, type=int, help='first epiweek override')
parser.add_argument('--last', '-l', default=None, type=int, help='last epiweek override')
parser.add_argument('--epiweek', '-w', default=None, type=int, help='epiweek override')
parser.add_argument('--test', '-t', default=False, action='store_true', help='dry run only')
args = parser.parse_args()
# sanity check
first, last, week = args.first, args.last, args.epiweek
for ew in [first, last, week]:
if ew is not None:
flu.check_epiweek(ew)
if first is not None and last is not None and first > last:
raise Exception('epiweeks in the wrong order')
if week is not None:
first = last = week
# extract the page hits for all states on the specified weeks
extract(first, last, args.test)
if __name__ == '__main__':
main()
| [
"dfarrow0@gmail.com"
] | dfarrow0@gmail.com |
36e251c5924bff41aea966c737d2fe875f0d3015 | 417c552d04854e23d266280e997321aae843a727 | /calcpkg/geometry.py | 45d64c3a89ef7d2b7b8e34180a64882cedfa3823 | [] | no_license | mywns123/dojangPython | 5e97079ca0bf2e2716f12a3f6f4674a8e049d9fe | 4946f4c78934ffd4ac6d395ed07753996204a163 | refs/heads/master | 2023-08-01T04:54:44.267809 | 2021-09-27T03:22:17 | 2021-09-27T03:22:17 | 381,210,408 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | def triangle_area(base, height):
return base * height / 2
def rectangle_area(width, height):
return width * height
| [
"wnsduq2000@naver.com"
] | wnsduq2000@naver.com |
c272181e517c8d3037a15c241cba4c3307a861fe | 531c1907cc202f66d4ef69d063c343d4ea4d7a7a | /exercicios/ex021.py | 41dfd2d873a1f334968fc069b0d680e08a5a1588 | [] | no_license | senseyluiz/estudopython | 1ac5e535314144a6bc2afdc4fcc70061ff9b57cd | 427306f832af7813ab3cd5616dad1168b4a6e866 | refs/heads/master | 2020-10-02T05:32:25.908371 | 2020-01-05T13:06:00 | 2020-01-05T13:06:00 | 227,712,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | from random import randint
x = 1
while x <= 6:
print(randint(1,60))
x += 1 | [
"senseyluiz@gmail.com"
] | senseyluiz@gmail.com |
1b1ac1d2e106127664ae091c0ade1719fb091e18 | 6ad54c3661c2f7d38cd3474e421997751614c8a8 | /test/hello.py | c9e7aa681cfc97599e5e6e5fd5b482e8c0df3551 | [] | no_license | ehyman/awesome-python-app | 9f17bf44cee7b257c3475c1537dba90e3031f125 | 78cbf88a756d7c92a20c63dc43fec97fb879ce2f | refs/heads/master | 2020-12-25T14:33:28.471758 | 2016-08-03T13:02:05 | 2016-08-03T13:02:05 | 67,108,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from flask import Flask
app = Flask(__name__)
@app.route('/')
def index():
return 'It works!'
@app.route('/hello')
def hello():
return 'hello, world!'
@app.route('/user/<username>')
def user_profile(username):
return 'hello, %s' % username
@app.route('/post/<int:post_id>')
def post(post_id):
return 'post id is %d' % post_id
if __name__ == '__main__':
app.run(debug=True)
| [
"elahyman@gmail.com"
] | elahyman@gmail.com |
0b7588a3b45b10353a4db110ad65bc568a6dbc53 | 81ed4db4dc598698371608c8173f5b2418432637 | /tests/functional_tests.py | 5ae764430e68cafcff7f486ed479a878416d84c2 | [] | no_license | feuloren/fexchange | d4b6697d7869c39b7cdc9627a4f8700237f8c824 | 011da9e6ead81b95b7110a920e8d88aa11f40721 | refs/heads/master | 2021-01-01T17:32:38.300210 | 2014-01-18T15:20:03 | 2014-01-18T15:20:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,155 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from pytest import fail
from selenium import webdriver
from selenium.webdriver.common.by import By
# Doc pour le webdriver :
#http://selenium.googlecode.com/git/docs/api/py/webdriver_remote/selenium.webdriver.remote.webdriver.html
# Et pour les webelement obtenus par un find_element :
#http://selenium.googlecode.com/git/docs/api/py/webdriver_remote/selenium.webdriver.remote.webelement.html#module-selenium.webdriver.remote.webelement
class TestComptesUtilisateur:
def setup(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def teardown(self):
self.browser.quit()
def test_connexion_cas(self):
self.browser.get('http://localhost:8888')
assert 'Adopte un meuble' in self.browser.title
# On veut s'authentifier avec le CAS
# d'abord on va sur la page de connexion
link = self.browser.find_element(By.LINK_TEXT, 'Connexion')
link.click()
assert self.browser.current_url == 'http://localhost:8888/auth'
assert 'Connexion' in self.browser.title
fail('Finish the test !')
| [
"florent@fthevenet.fr"
] | florent@fthevenet.fr |
b4571590ec6a3e3ec47fcc2114275054d35df44f | d1ddb9e9e75d42986eba239550364cff3d8f5203 | /google-cloud-sdk/lib/surface/container/builds/describe.py | c3386e7ed0435df473c1cbd30730d4657cc15fba | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | bopopescu/searchparty | 8ecd702af0d610a7ad3a8df9c4d448f76f46c450 | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | refs/heads/master | 2022-11-19T14:44:55.421926 | 2017-07-28T14:55:43 | 2017-07-28T14:55:43 | 282,495,798 | 0 | 0 | Apache-2.0 | 2020-07-25T17:48:53 | 2020-07-25T17:48:52 | null | UTF-8 | Python | false | false | 2,193 | py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Describe build command."""
from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.calliope import base
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
class Describe(base.DescribeCommand):
"""Get information about a particular build."""
@staticmethod
def Args(parser):
"""Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.
"""
parser.add_argument(
'build',
completion_resource='cloudbuild.projects.builds',
list_command_path='container builds list --uri',
help=('The build to describe. The ID of the build is printed at the '
'end of the build submission process, or in the ID column when '
'listing builds.'),
)
def Run(self, args):
"""This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.
"""
client = cloudbuild_util.GetClientInstance()
build_ref = resources.REGISTRY.Parse(
args.build,
params={'projectId': properties.VALUES.core.project.GetOrFail},
collection='cloudbuild.projects.builds')
return client.projects_builds.Get(
client.MESSAGES_MODULE.CloudbuildProjectsBuildsGetRequest(
projectId=build_ref.projectId, id=build_ref.id))
| [
"vinvivo@users.noreply.github.com"
] | vinvivo@users.noreply.github.com |
91a257e297a4cd745426be39c93b98f9223448d3 | a41ee6d7d2e120ad55bc211f8f09b26c7a36541e | /Full python (boring to interesting)/6_project_stopWatch.py | 0c6d9319b896951b58dfeb06bd16c94307735b3d | [] | no_license | ujjwalsb/python | 0d213a4e0cf26e9ef266ce3656853cd8af487c10 | f66cb7b2017992f99496d878bf89c634aa797cf2 | refs/heads/master | 2023-05-27T10:44:54.103400 | 2021-06-14T17:41:50 | 2021-06-14T17:41:50 | 110,427,253 | 5 | 2 | null | 2021-06-14T17:41:51 | 2017-11-12T11:35:43 | Python | UTF-8 | Python | false | false | 759 | py | # /usr/bin/python3
# A simple stopwatch program.
import time
# Display the program's instructions.
print('Press ENTER to begin. Afterwards, press ENTER to "Click" the Stopwatch.\
Press Ctrl+C to quit.')
input() # press Enter to begin.
print('Started.')
startTime = time.time() # Get the first lap's start time.
lastTime = startTime
lapNum = 1
# Start tracking the lap times.
try:
while True:
input()
lapTime = round(time.time() - lastTime, 2)
totalTime = round(time.time() - startTime, 2)
print('Lap #%s: %s (%s)' % (lapNum, totalTime, lapTime), end = '')
lapNum+=1
lastTime = time.time() # Reset the last lap time.
except KeyboardInterrupt:
# Handle the Ctrl+C exception to keep its error message from displaying.
print('\nDone.') | [
"ujjwalsingh15@gmail.com"
] | ujjwalsingh15@gmail.com |
78d28ede545ee5391bd12639beca0fcaf2fe7ff7 | ed8313e4a74d63591334477de28bd75114329c1a | /backend/api/repos.py | 9713273aa96885c8b238d91d2e4bbbc266cc9713 | [] | no_license | allenkg/test_students | 0ab2a9fad2203c409c3719fa0a4314efcb96fb5f | 1ebe034c86ca6f4395ad88ae7b995b82ea1f8ad6 | refs/heads/master | 2020-03-27T21:06:16.708216 | 2018-09-17T20:56:56 | 2018-09-17T20:56:56 | 147,115,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,545 | py | from api.common.exceptions import EntityDoesNotExist
from api.models import Student, Course
class StudentRepo(object):
def get_student_by_id(self, id):
try:
student = Student.objects.get(pk=id)
except Student.DoesNotExist:
raise EntityDoesNotExist
return student
def create_student(self, email, first_name, last_name, phone_number, id_number):
student = Student()
student.first_name = first_name
student.last_name = last_name
student.email = email
student.phone_number = phone_number
student.id_number = id_number
student.save()
return student
def save_student(self, student):
student.save()
def is_email_already_used(self, email):
return Student.objects.filter(email=email).exists()
def get_all_students(self):
return Student.objects.filter(is_deleted=False)
def remove_from_course(self, student_id):
student = Student.objects.get(id=student_id)
student.courses = None
student.save()
return student
def delete_student(self, id):
try:
student = Student.objects.get(pk=id)
student.is_deleted = True
except Student.DoesNotExist:
raise EntityDoesNotExist
return student
def add_course(self, course_id):
return Course.objects.get(id=course_id)
def search_student(self, search_query):
students = Student.objects.filter(first_name__icontains=search_query[0])
return students
def update_student(self, student_id, email, first_name, last_name, course, phone_number, id_number):
student = self.get_student_by_id(student_id)
student.student_id = student_id
student.email = email if email else student.email
student.first_name = first_name
student.last_name = last_name
if course:
student.courses = self.add_course(course['id'])
student.phone_number = phone_number
student.id_number = id_number
self.save_student(student)
return student
class CourseRepo(object):
def get_course_by_id(self, id):
try:
course = Course.objects.get(pk=id)
except Student.DoesNotExist:
raise EntityDoesNotExist
return course
def create_course(self, title, description, img):
course = Course()
course.title = title
course.description = description
course.img = img
course.save()
return course
def save_course(self, sourse):
sourse.save()
def get_all_courses(self, offset=None, page_number=None):
courses = Course.objects.filter(is_deleted=False)
response = {}
if offset:
page_number = int(page_number[0])
offset = int(offset[0])
page_offset = 0
if page_number <= 1:
response = {
'courses': courses[:offset],
'allPages': len(courses),
'offset': offset
}
else:
page_offset = offset * page_number
response = {
'courses': courses[offset:page_offset],
'allPages': len(courses),
'offset': offset
}
else:
response = {
'courses': courses,
'allPages': len(courses),
'offset': offset
}
return response
def get_all_course_students(self, course_id):
students = Student.objects.filter(courses_id=course_id[0], is_deleted=False)
response = {
'courses': students,
'allPages': len(students),
'offset': ''
}
return response
def delete_course(self, id):
try:
course = Course.objects.get(pk=id)
course.is_deleted = True
except Student.DoesNotExist:
raise EntityDoesNotExist
return course
def update_course(self, id, title, description):
course = self.get_course_by_id(id)
course.title = title
course.description = description
course.save()
return course
def search_course(self, search_query):
course = Course.objects.filter(title__icontains=search_query[0])
response = {
'courses': course,
'allPages': len(course),
'offset': ''
}
return response
| [
"andyahont@gmail.com"
] | andyahont@gmail.com |
a1f4226052a32689bc862ca22f433d56200649fc | e4ab008b3768cdfcc6e424a3936508554fc0f152 | /jobFinder/jobFinder/settings.py | aa354c8f7d1bc32aa13e240ebdbb894fc1632110 | [] | no_license | janithmalshan/pythonFinalProject | bf4323d6831bfa03c92aa60526d9c2fae2e98651 | 355c0047d617ecc865f8a7e917618313c5356152 | refs/heads/master | 2023-06-20T18:51:39.617376 | 2021-07-25T14:10:51 | 2021-07-25T14:10:51 | 384,674,214 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,378 | py | """
Django settings for jobFinder project.
Generated by 'django-admin startproject' using Django 3.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-%rlttnz+=pe$=n#p<a@0yn%9bx!c!ajqz(+co8-k(@w@*zj3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'viewer',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'jobFinder.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'jobFinder.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media') | [
"janithmalshan@gmail.com"
] | janithmalshan@gmail.com |
8f608eb54cc99e4c496150d0edecc71a52d4e030 | 3cda2dc11e1b7b96641f61a77b3afde4b93ac43f | /nni/experiment/config/utils/__init__.py | c4b8b586d0953435188171ce60154e6e190380ee | [
"MIT"
] | permissive | Eurus-Holmes/nni | 6da51c352e721f0241c7fd26fa70a8d7c99ef537 | b84d25bec15ece54bf1703b1acb15d9f8919f656 | refs/heads/master | 2023-08-23T10:45:54.879054 | 2023-08-07T02:39:54 | 2023-08-07T02:39:54 | 163,079,164 | 3 | 2 | MIT | 2023-08-07T12:35:54 | 2018-12-25T12:04:16 | Python | UTF-8 | Python | false | false | 237 | py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
"""
Utility functions for experiment config classes.
Check "public.py" to see which functions you can utilize.
"""
from .public import *
from .internal import *
| [
"noreply@github.com"
] | noreply@github.com |
afbcbe77345db932b12c03726ae7f17b1697b727 | f95b77be3eee8bb6bdb61bd4128fd8321bc8889c | /yl_lab1/lab1.py | 4fa2df853ada57f5c27753f9854f44664c44c34e | [] | no_license | or20-meet/meetyl1201819 | c02df7c9ae18d310d3621b647e803dde60e26d17 | e1ee6aff0815b9671d8ac70967d740bf8da6ea5d | refs/heads/master | 2020-04-02T04:28:49.593304 | 2019-02-23T16:29:35 | 2019-02-23T16:29:35 | 154,018,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | import turtle
turtle.goto(0,100)
turtle.goto(100,100)
turtle.goto(100,0)
turtle.goto(0,0)
turtle.mainloop() | [
"or20@meet.mit.edu"
] | or20@meet.mit.edu |
937175e4b5291adf7936b6fa829b3bbd28c7c340 | 3af2998fe7bc3c48fbe6eae476f7e0ec5bfc0ca6 | /control_flow/while_loop.py | 503c2959186fb69a16e04e59916b7b694844032c | [] | no_license | east825/python-inference-playground | 22acb8f2c71eb07e13293a9fec1d67a6f5aa25cf | f60387604a1c535ad30b7f3f44acf08cbd7d88c7 | refs/heads/master | 2020-06-01T01:29:00.399986 | 2014-04-15T12:28:18 | 2014-04-15T12:28:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 310 | py | while False:
if True:
a1 = 42
break
else:
a1 = 'spam'
print(a1)
while False:
if True:
a2 = 42
else:
a2 = 'spam'
print(a2)
a3 = 42
while False:
if True:
a3 = 'spam'
print(a3)
while False:
if True:
a4 = 42
else:
a4 = 'spam'
print(a4) | [
"mikhail.golubev@jetbrains.com"
] | mikhail.golubev@jetbrains.com |
f3b10390cab97640945f5d3aca56278e1e40272a | c1dd4501e134e3cfef5b7dc82d0e022e3ec7e9b6 | /apps/authentication/models.py | c031d2d7466c3a46d4046b2478ba78cb8946cc6d | [] | no_license | artemmj/set_up_jwt_django | a189cd1d59eac5fe9d02772284f794480f7525f7 | 8ba80f83b8516e5a2226e005ec22a821997c319f | refs/heads/master | 2023-04-25T01:22:41.274238 | 2021-04-28T11:47:50 | 2021-04-28T11:47:50 | 362,447,798 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,489 | py | import jwt
from datetime import datetime, timedelta
from django.conf import settings
from django.contrib.auth.models import (
AbstractBaseUser, BaseUserManager, PermissionsMixin,
)
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
class UserManager(BaseUserManager):
"""
Django требует, чтобы кастомные пользователи определяли свой собственный
класс Manager. Много кода дублирует Django-функционал.
"""
def create_user(self, username, email, phone=None, password=None):
""" Создает и возвращает пользователя с имэйлом, паролем и именем. """
if username is None:
raise TypeError('Users must have a username.')
if email is None:
raise TypeError('Users must have an email address.')
# if phone is None:
# raise TypeError('Users must have an phone number')
user = self.model(username=username, email=self.normalize_email(email))
user.set_password(password)
user.save()
return user
def create_superuser(self, username, email, password):
""" Создает и возвращет пользователя с привилегиями суперадмина. """
if password is None:
raise TypeError('Superusers must have a password.')
user = self.create_user(username, email, password)
user.is_superuser = True
user.is_staff = True
user.save()
return user
class User(AbstractBaseUser, PermissionsMixin):
username = models.CharField(db_index=True, max_length=255, unique=True)
email = models.EmailField(db_index=True, unique=True)
phone = PhoneNumberField(null=True, blank=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
# Свойство USERNAME_FIELD - поле для входа в систему, логин.
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
# Установать в рабочий UserManager() выше.
objects = UserManager()
def __str__(self):
return self.email
@property
def token(self):
"""
Позволяет получить токен пользователя путем вызова user.token, вместо
user._generate_jwt_token(). Декоратор @property выше делает это
возможным. token называется "динамическим свойством".
"""
return self._generate_jwt_token()
def get_full_name(self):
return self.username
def get_short_name(self):
return self.username
def _generate_jwt_token(self):
"""
Генерирует веб-токен JSON, в котором хранится идентификатор этого
пользователя, срок действия токена составляет 1 день от создания
"""
dt = datetime.now() + timedelta(days=1)
token = jwt.encode({
'id': self.pk,
'exp': int(dt.strftime('%s'))
}, settings.SECRET_KEY, algorithm='HS256')
return token.decode('utf-8')
| [
"webep4@gmail.com"
] | webep4@gmail.com |
778e3ae044f6c1eabf5462f9e16d75af88ce8eb1 | 10c9fee15669f72e79e50ba59569c48ee7c96997 | /back-end/etudiants.py | 723072ecbe292a863abc19d607b012075cc5e169 | [] | no_license | MouadAe/iHome | 4e915092548b4e4a66f22ce330b5171b224301db | 2067a94a26ce9b221d31584535e3dc405e7abebf | refs/heads/master | 2023-07-16T00:24:08.791428 | 2021-08-08T23:11:01 | 2021-08-08T23:11:01 | 394,084,880 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,254 | py | from views import app
from flask import Flask , jsonify, request, redirect
from flask_pymongo import PyMongo
from werkzeug.security import generate_password_hash, check_password_hash
from flask_jwt_extended import create_access_token
from flask_jwt_extended import get_jwt_identity
from flask_jwt_extended import jwt_required
from flask_jwt_extended import JWTManager
from dotenv import load_dotenv
load_dotenv()
import os
jwt = JWTManager(app)
app.config["MONGO_URI"] = "mongodb+srv://soufiane:ihomedb@cluster0.lxgog.mongodb.net/ihome?retryWrites=true&w=majority"
mongo = PyMongo(app)
@app.route('/Login', methods=['POST'])
def login():
data=request.get_json()
user=mongo.db.users.find_one({"email": data['email']})
if user :
if check_password_hash(user['password'],data['password']) :
access_token = create_access_token({ "id": str(user['_id']), "email": user['email'],"status":user['status']})
response= jsonify(access_token)
response.status_code=200
else:
response= jsonify({"status": "PASSWORD_NOT_MTACH"})
response.status_code=403
else:
response= jsonify({"status": "MUST_SIGNUP"})
response.status_code=403
return response | [
"mouadaouane49@gmail.com"
] | mouadaouane49@gmail.com |
9e518f73a4a29b04a4ec59db895584adc263ae23 | e08e994abad15444b3a254c660c724fbdd1f0d89 | /no_shebang.py | d193b243d75d4f58c6ffc829f0ce37f07805c17c | [] | no_license | jjalal1234/myCode | 8e02b79d2a4788ea201d2e313380614afde3a08d | 321a9cb82189622808051331b5415465fc2d3fbc | refs/heads/master | 2020-05-22T18:10:14.520163 | 2019-06-27T20:12:14 | 2019-06-27T20:12:14 | 186,467,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | print("ello")
print("Did you say, hello?")
print("No, I sad ello, but that\'s close enough.")
| [
"jjalal1234@gmail.com"
] | jjalal1234@gmail.com |
891869c00f24639fa8d33f4d0a3dea0f62cc2f18 | 5b1eb22194cb2f4c9df63765f78a6998a6ad3de2 | /src/helpsys.py | ad173e7ce6e17082de7d532ab17840bb10f89ca2 | [] | no_license | Jawmo/akriosmud | 85c2ecd520fd15ba86d0210b018055146b9e2192 | aac434919586f5590f089e8e87e0f2e946a80aa9 | refs/heads/master | 2020-10-01T23:26:47.290481 | 2019-12-09T01:11:05 | 2019-12-09T01:11:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,804 | py | #! usr/bin/env python
# Project: Akrios
# Filename: helpsys.py
#
# File Description: Module to handle the help system.
#
# By: Jubelo
from collections import namedtuple
import glob
import logging
import json
import os
import olc
import world
log = logging.getLogger(__name__)
WRITE_NEW_FILE_VERSION = False
# Define some named tuples for various Help file values
Section = namedtuple("Section", "name")
sections = {"player": Section("player"),
"administrative": Section("administrative"),
"builder": Section("builder"),
"deity": Section("deity")}
class Help(olc.Editable):
CLASS_NAME = "__Help__"
FILE_VERSION = 1
def __init__(self, path):
super().__init__()
self.path = path
self.json_version = Help.FILE_VERSION
self.json_class_name = Help.CLASS_NAME
self.builder = None
self.creator = ""
self.viewable = ""
self.keywords = []
self.topics = ""
self.section = ""
self.description = ""
self.commands = {"viewable": ("string", ["true", "false"]),
"creator": ("string", None),
"keywords": ("list", None),
"topics": ("string", None),
"section": ("string", sections),
"description": ("description", None)}
if os.path.exists(path):
self.load()
def to_json(self):
if self.json_version == 1:
jsonable = {"json_version": self.json_version,
"json_class_name": self.json_class_name,
"creator": self.creator,
"viewable": self.viewable,
"keywords": self.keywords,
"topics": self.topics,
"section": self.section,
"description": self.description}
return json.dumps(jsonable, sort_keys=True, indent=4)
def load(self):
log.debug(f"Loading help file: {self.path}")
if self.path.endswith("json"):
with open(self.path, "r") as thefile:
help_file_dict = json.loads(thefile.read())
for eachkey, eachvalue in help_file_dict.items():
setattr(self, eachkey, eachvalue)
def save(self):
with open(f"{self.path}", "w") as thefile:
thefile.write(self.to_json())
def display(self):
return (f"{{BCreator{{x: {self.creator}\n"
f"{{BViewable{{x: {self.viewable}\n"
f"{{BKeywords{{x: {', '.join(self.keywords)}\n"
f"{{BTopics{{x: {self.topics}\n"
f"{{BSection{{x: {self.section}\n"
f" {{y{', '.join(sections)}\n"
f"{{BDescription{{x:\n\r"
f"{self.description[:190]}|...{{x\n\r")
helpfiles = {}
def init():
log.info("Initializing all help files.")
allhelps = glob.glob(os.path.join(world.helpDir, "*.json"))
for singlehelp in allhelps:
thehelp = Help(singlehelp)
for keyword in thehelp.keywords:
helpfiles[keyword] = thehelp
if WRITE_NEW_FILE_VERSION:
thehelp.save()
def reload():
helpfiles = {}
init()
def get_help(key, server=False):
key = key.lower()
if key:
if key in helpfiles:
if helpfiles[key].viewable.lower() == "true" or server:
return helpfiles[key].description
else:
log.warning(f"MISSING HELP FILE: {key}")
return "We do not appear to have a help file for that topic. "\
"We have however logged the attempt and will look into creating "\
"a help file for that topic as soon as possible.\n\r"
| [
"phippsb@gmail.com"
] | phippsb@gmail.com |
14d21ba34e1b8337cb0439ea712b203c5317703c | 2a8abd5d6acdc260aff3639bce35ca1e688869e9 | /telestream_cloud_qc_sdk/telestream_cloud_qc/models/audio_loudness_itest.py | 387597f5a62e6090eec6343a0872aa4c534e52f5 | [
"MIT"
] | permissive | Telestream/telestream-cloud-python-sdk | 57dd2f0422c83531e213f48d87bc0c71f58b5872 | ce0ad503299661a0f622661359367173c06889fc | refs/heads/master | 2021-01-18T02:17:44.258254 | 2020-04-09T11:36:07 | 2020-04-09T11:36:07 | 49,494,916 | 0 | 0 | MIT | 2018-01-22T10:07:49 | 2016-01-12T11:10:56 | Python | UTF-8 | Python | false | false | 7,273 | py | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: cloudsupport@telestream.net
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from telestream_cloud_qc.configuration import Configuration
class AudioLoudnessItest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'loudness_level': 'float',
'loudness_tolerance': 'float',
'mode': 'LoudnessMode',
'channels': 'Channels',
'reject_on_error': 'bool',
'do_correction': 'bool'
}
attribute_map = {
'loudness_level': 'loudness_level',
'loudness_tolerance': 'loudness_tolerance',
'mode': 'mode',
'channels': 'channels',
'reject_on_error': 'reject_on_error',
'do_correction': 'do_correction'
}
def __init__(self, loudness_level=None, loudness_tolerance=None, mode=None, channels=None, reject_on_error=None, do_correction=None, local_vars_configuration=None): # noqa: E501
"""AudioLoudnessItest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._loudness_level = None
self._loudness_tolerance = None
self._mode = None
self._channels = None
self._reject_on_error = None
self._do_correction = None
self.discriminator = None
if loudness_level is not None:
self.loudness_level = loudness_level
if loudness_tolerance is not None:
self.loudness_tolerance = loudness_tolerance
if mode is not None:
self.mode = mode
if channels is not None:
self.channels = channels
if reject_on_error is not None:
self.reject_on_error = reject_on_error
if do_correction is not None:
self.do_correction = do_correction
@property
def loudness_level(self):
"""Gets the loudness_level of this AudioLoudnessItest. # noqa: E501
:return: The loudness_level of this AudioLoudnessItest. # noqa: E501
:rtype: float
"""
return self._loudness_level
@loudness_level.setter
def loudness_level(self, loudness_level):
"""Sets the loudness_level of this AudioLoudnessItest.
:param loudness_level: The loudness_level of this AudioLoudnessItest. # noqa: E501
:type: float
"""
self._loudness_level = loudness_level
@property
def loudness_tolerance(self):
"""Gets the loudness_tolerance of this AudioLoudnessItest. # noqa: E501
:return: The loudness_tolerance of this AudioLoudnessItest. # noqa: E501
:rtype: float
"""
return self._loudness_tolerance
@loudness_tolerance.setter
def loudness_tolerance(self, loudness_tolerance):
"""Sets the loudness_tolerance of this AudioLoudnessItest.
:param loudness_tolerance: The loudness_tolerance of this AudioLoudnessItest. # noqa: E501
:type: float
"""
self._loudness_tolerance = loudness_tolerance
@property
def mode(self):
"""Gets the mode of this AudioLoudnessItest. # noqa: E501
:return: The mode of this AudioLoudnessItest. # noqa: E501
:rtype: LoudnessMode
"""
return self._mode
@mode.setter
def mode(self, mode):
"""Sets the mode of this AudioLoudnessItest.
:param mode: The mode of this AudioLoudnessItest. # noqa: E501
:type: LoudnessMode
"""
self._mode = mode
@property
def channels(self):
"""Gets the channels of this AudioLoudnessItest. # noqa: E501
:return: The channels of this AudioLoudnessItest. # noqa: E501
:rtype: Channels
"""
return self._channels
@channels.setter
def channels(self, channels):
"""Sets the channels of this AudioLoudnessItest.
:param channels: The channels of this AudioLoudnessItest. # noqa: E501
:type: Channels
"""
self._channels = channels
@property
def reject_on_error(self):
"""Gets the reject_on_error of this AudioLoudnessItest. # noqa: E501
:return: The reject_on_error of this AudioLoudnessItest. # noqa: E501
:rtype: bool
"""
return self._reject_on_error
@reject_on_error.setter
def reject_on_error(self, reject_on_error):
"""Sets the reject_on_error of this AudioLoudnessItest.
:param reject_on_error: The reject_on_error of this AudioLoudnessItest. # noqa: E501
:type: bool
"""
self._reject_on_error = reject_on_error
@property
def do_correction(self):
"""Gets the do_correction of this AudioLoudnessItest. # noqa: E501
:return: The do_correction of this AudioLoudnessItest. # noqa: E501
:rtype: bool
"""
return self._do_correction
@do_correction.setter
def do_correction(self, do_correction):
"""Sets the do_correction of this AudioLoudnessItest.
:param do_correction: The do_correction of this AudioLoudnessItest. # noqa: E501
:type: bool
"""
self._do_correction = do_correction
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AudioLoudnessItest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, AudioLoudnessItest):
return True
return self.to_dict() != other.to_dict()
| [
"cloudsupport@telestream.net"
] | cloudsupport@telestream.net |
ee4f61f74210096066bc2bcba7fd5cea88b316c6 | ddbad6b2bd4ce606dfc97e827b7eff0005e36b3a | /freeradius/app_test/models_io.py | 5f748bd7a4b6f3b966010a631239d26fc9b26ae5 | [] | no_license | sk-rama/freeradius | ac325ae5716ea662281f593b2f653701410794ad | b97ff16cb4d6fae043d9b2ca4df12b1c2f9dfd23 | refs/heads/master | 2023-03-03T05:41:34.275612 | 2021-02-16T10:59:07 | 2021-02-16T10:59:07 | 331,944,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | from pydantic import BaseModel, EmailStr, validator
from typing import Optional, List
from datetime import datetime
import re
class UserOut(BaseModel):
id: int
name: str
company: Optional[str] = None
email: Optional[EmailStr] = None
ts: datetime = None
@validator('ts', pre=True, always=True)
def set_ts_now(cls, v):
return v or datetime.now()
class TelNumberOut(BaseModel):
id: Optional[int] = None
username: Optional[str] = None
value: Optional[str] = None
class Config:
orm_mode = True
class TelNumberIn(BaseModel):
username: Optional[str] = None
@validator('username', pre=True, always=True)
def is_tel_number(cls, v):
if v[0:3] != '420':
raise ValueError('must start with string 420')
if len(v) != 12:
raise ValueError('must contain 12 numbers')
return v
class TelNumbersIn(BaseModel):
tel_numbers: List[str] = None
@validator('tel_numbers', each_item=True, always=True)
def is_tel_number(cls, v):
if v[0:3] != '420':
raise ValueError('must start with string 420')
if len(v) != 12:
raise ValueError('must contain 12 numbers')
if len(v) != len(re.match('[0-9]*', v).group(0)):
raise ValueError('Tel. Number must contain only numbers')
return v
| [
"rrastik@seznam.cz"
] | rrastik@seznam.cz |
6e65fb505daa2b5a7ee67d8524269f3177d69153 | b9a4efbcf48e52a1333f6a548338e2f62aed30e3 | /forms/urls/house_map_construction.py | 39240ea9ad410035e9f9e17f93e9ce87c87d90b6 | [] | no_license | Rabin5/formcollection | 0747639d9a2ff291457aacce874eb5a6428dea73 | 38c0bf763ae0a15c301c020d76ff0596c561da14 | refs/heads/main | 2023-08-10T18:48:26.736876 | 2021-09-26T06:19:09 | 2021-09-26T06:19:09 | 410,467,808 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from django.urls import path
from forms.views.house_map_construction import HouseMapConstructionCreateView, HouseMapConstructionUpdateView
app_name = 'house_map'
urlpatterns = [
path('create/', HouseMapConstructionCreateView.as_view(), name='create'),
path('<int:pk>/update', HouseMapConstructionUpdateView.as_view(), name='update'),
]
| [
"rai.rabin@infodevelopers.com.np"
] | rai.rabin@infodevelopers.com.np |
c7355a7926645add681b504489ff0e5c7ae88407 | d700c1974e438338b2ba6fd07d2484ab5134d270 | /algorithms/sorting/test.py | 76646bc29a8684891e31197059600507bf1cf574 | [] | no_license | AlekseySh/computer_science | eff26a2fccbbfc97f95a41aacda2efe9c7fa5596 | 8c470b200717cf3798a86474c2c192fee80e1f44 | refs/heads/master | 2022-09-17T12:09:13.426657 | 2022-08-23T21:29:08 | 2022-08-23T21:29:08 | 163,635,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | from random import randint
def test(sort_func,
n_test=1000,
size_bounds=(0, 10),
val_bounds=(0, 10),
inplace_sort=True
):
for _ in range(n_test):
x = [randint(*val_bounds) for _ in range(randint(*size_bounds))]
if inplace_sort:
x_for_default = x.copy()
x_for_custom = x.copy()
x_for_default.sort() # default inplace sort
sort_func(x_for_custom) # custom inplace sort
assert x_for_custom == x_for_default, \
f'for input: {x}, sorted: {x_for_custom}'
else:
assert sorted(x) == sort_func(x), \
f'for input: {x}, sorted: {x_for_custom}'
| [
"lexibender@ya.ru"
] | lexibender@ya.ru |
603148152e47b6b5f499dbd1eb89acda902ebc7c | 4bb891a60497c0effb6e20468cb8db25bbbeefc9 | /painters-identification/scripts/capstone-models-final-model-building.py | 1f2e5cb880f131ec67f821ac9a720b0a3d1b3250 | [
"MIT"
] | permissive | mhdella/deep-learning-1 | f9a8da9cab73ca407a33cf3c17ff5fabaf9e3993 | 604054d8f728b56bb9dedb1743dbf9e83b5664cf | refs/heads/master | 2020-06-21T13:33:44.306925 | 2019-04-16T22:11:24 | 2019-04-16T22:11:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,172 | py |
# coding: utf-8
# In[ ]:
<br>
# Painters Identification using ConvNets
### Marco Tavora
<br>
## Index
# - [Building Convolutional Neural Networks](#convnets)
# - [Small ConvNets](#smallconvnets)
# - [Imports for Convnets](#importconvnets)
# - [Preprocessing](#keraspreprocessing)
# - [Training the model](#traincnn)
# - [Plotting the results](#plotting)
# - [Transfer learning: Using InceptionV3](#VGG16)
# - [Comments](#comments)
# - [References](#ref)
# <br>
# <br>
print('Created using Python', platform.python_version())
## Introduction
The challenge of recognizing artists given their paintings has been, for a long time, far beyond the capability of algorithms. Recent advances in deep learning, specifically the development of convolutional neural networks, have made that task possible. One of the advantages of these methods is that, in contrast to several methods employed by art specialists, they are not invasive and do not interfere with the painting.
## Overview
# I used Convolutional Neural Networks (ConvNets) to identify the artist of a given painting. The dataset contains a minimum of 400 paintings per artist <br> from a set of 37 famous artists.
# <br><br>
# I trained a small ConvNet built from scratch, and also used transfer learning, fine-tuning the top layers of a deep pre-trained networks (VGG16).
# ## Problems with small datasets
# The number of training examples in our dataset is small (for image recognition standards). Therefore, making predictions with high accuracy avoiding overfitting becomes a difficult task. To build classification systems with the level of capability of current state-of-the-art models would need millions of training examples. Example of such models are the ImageNet models. Examples of these models include:
# - VGG16
# - VGG19
# - ResNet50
# - Inception V3
# - Xception
## Preprocessing
# The `Keras` class `keras.preprocessing.image.ImageDataGenerator` generates batches of image data with real-time data augmentation and defines the configuration for both image data preparation and image data augmentation. Data augmentation is particularly useful in cases like the present one, where the number of images in the training set is not large, and overfitting can become an issue.
# To create an augmented image generator we can follow these steps:
# - We must first create an instance i.e. an augmented image generator (using the command below) where several arguments can be chosen. These arguments will determine the alterations to be performed on the images during training:
# datagen = ImageDataGenerator(arguments)
# - To use `datagen` to create new images we call the function `fit_generator( )` with the desired arguments.
# I will quickly explain some possible arguments of `ImageDataGenerator`:
# - `rotation range` defines the amplitude that the images will be rotated randomly during training. Rotations aren't always useful. For example, in the MNIST dataset all images have normalized orientation, so random rotations during training are not needed. In tour present case it is not clear how useful rotations are so I will choose an small argument (instead of just setting it to zero).
# - `rotation_range`, `width_shift_range`, `height_shift_range` and `shear_range`: the ranges of random shifts and random shears should be the same in our case, since the images were resized to have the same dimensions.
# - I set `fill mode` to be `nearest` which means that pixels that are missing will be filled by the nearest ones.
# - `horizontal_flip`: horizontal (and vertical) flips can be useful here since in many examples in our dataset there is no clear definition of orientation (again the MNIST dataset is an example where flipping is not useful)
# - We can also standardize pixel values using the `featurewise_center` and `feature_std_normalization` arguments.
# ***
# ## Transfer Learning
# One way to circunvent this issue is to use 'Transfer Learning', where we use a pre-trained model, modify its final layers and apply to our dataset. When the dataset is too small, these pre-trained models act as feature generators only (see discussion below). As will be illustrated later on, when the dataset in question has some reasonable size, one can drop some layers from the original model, stack a model on top of the network and perform some parameters fine-tuning.
# Before following this approach, I will, in the next section, build a small ConvNet "from scratch".
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras.models import Sequential
from keras.preprocessing import image
from keras.layers import Dropout, Flatten, Dense
from keras import applications
from keras.utils.np_utils import to_categorical
from keras import applications
from keras.applications.imagenet_utils import preprocess_input
from imagenet_utils import decode_predictions
import math, cv2
folder_train = './train_toy_3/'
folder_test = './test_toy_3/'
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
rotation_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
rescale = 1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
from keras.preprocessing.image import ImageDataGenerator, img_to_array, load_img
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from keras.callbacks import EarlyStopping, Callback
K.image_data_format() # this means that "backend": "tensorflow". Channels are RGB
from keras import applications
from keras.utils.np_utils import to_categorical
import math, cv2
## Defining the new size of the image
# - The images from Wikiart.org had a extremely large size, I wrote a simple function `preprocess( )` (see the notebook about data analysis in this repo) to resize the images. In the next cell I resize them again and play with the size to see how it impacts accuracy.
# - The reason why cropping the image is partly justified is that I believe, the style of the artist is present everywhere in the painting, so cropping shouldn't cause major problems.
img_width, img_height = 120,120
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
print('Theano Backend')
else:
input_shape = (img_width, img_height, 3)
print('TensorFlow Backend')
input_shape
nb_train_samples = 0
for p in range(len(os.listdir(os.path.abspath(folder_train)))):
nb_train_samples += len(os.listdir(os.path.abspath(folder_train) +'/'+ os.listdir(
os.path.abspath(folder_train))[p]))
nb_train_samples
nb_test_samples = 0
for p in range(len(os.listdir(os.path.abspath(folder_test)))):
nb_test_samples += len(os.listdir(os.path.abspath(folder_test) +'/'+ os.listdir(
os.path.abspath(folder_test))[p]))
nb_test_samples
## Batches and Epochs:
# - Batch: a set of $N$ samples. The samples in a batch are processed independently, in parallel. If training, a batch results in only one update to the model (extracted from the docs).
# - Epoch: an arbitrary cutoff, generally defined as "one pass over the entire dataset", used to separate training into distinct phases, which is useful for logging and periodic evaluation. When using `evaluation_data` or `evaluation_split` with the `fit` method of Keras models, evaluation will be run at the end of every epoch (extracted from the docs).
# - Larger batch sizes:faster progress in training, but don't always converge as fast.
# - Smaller batch sizes: train slower, but can converge faster. It's definitely problem dependent.
train_data_dir = os.path.abspath(folder_train) # folder containing training set already subdivided
validation_data_dir = os.path.abspath(folder_test) # folder containing test set already subdivided
nb_train_samples = nb_train_samples
nb_validation_samples = nb_test_samples
epochs = 100
batch_size = 16 # batch_size = 16
num_classes = len(os.listdir(os.path.abspath(folder_train)))
print('The painters are',os.listdir(os.path.abspath(folder_train)))
### Class for early stopping
# Model stops training when 10 epochs do not show gain in accuracy.
# rdcolema
class EarlyStoppingByLossVal(Callback):
"""Custom class to set a val loss target for early stopping"""
def __init__(self, monitor='val_loss', value=0.45, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
if current < self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping THR" % epoch)
self.model.stop_training = True
early_stopping = EarlyStopping(monitor='val_loss', patience=10, mode='auto') #
top_model_weights_path = 'bottleneck_fc_model.h5'
### Creating InceptionV3 model
# We now create the InceptionV3 model without the final fully-connected layers (setting `include_top=False`) and loading the ImageNet weights (by setting `weights ='imagenet`)
from keras.applications.inception_v3 import InceptionV3
model = applications.InceptionV3(include_top=False, weights='imagenet')
applications.InceptionV3(include_top=False, weights='imagenet').summary()
type(applications.InceptionV3(include_top=False, weights='imagenet').summary())
### Training and running images on InceptionV3
# We first create the generator. The generator is an iterator that generates batches of images when requested using e.g. `flow( )`.
datagen = ImageDataGenerator(rescale=1. / 255)
generator = datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
nb_train_samples = len(generator.filenames)
num_classes = len(generator.class_indices)
predict_size_train = int(math.ceil(nb_train_samples / batch_size))
print('Number of training samples:',nb_train_samples)
print('Number of classes:',num_classes)
### Bottleneck features
# The extracted features, which are the last activation maps before the fully-connected layers in the pre-trained model, are called "bottleneck features". The function `predict_generator( )` generates predictions for the input samples from a data generator.
bottleneck_features_train = model.predict_generator(generator, predict_size_train) # these are numpy arrays
bottleneck_features_train[0].shape
bottleneck_features_train.shape
In the next cell, we save the bottleneck features to help training our data:
np.save('bottleneck_features_train.npy', bottleneck_features_train)
Using `predict( )` we see that, indeed, `ResNet50` is able to identify some objects in the painting. The function `decode_predictions` decodes the results into a list of tuples of the form (class, description, probability). We see below that the model identifies the house in the image as a castle or mosque and shows correctly a non-zero probability of finding a seashore in the painting. In this case, `ResNet50` acts as a feature generator.
Repeating the steps for the validation data:
generator = datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
nb_validation_samples = len(generator.filenames)
predict_size_validation = int(math.ceil(nb_validation_samples / batch_size))
print('Number of testing samples:',nb_validation_samples)
bottleneck_features_validation = model.predict_generator(
generator, predict_size_validation)
np.save('bottleneck_features_validation.npy', bottleneck_features_validation)
### Training the fully-connected network (the top-model)
We now load the features just obtained, get the class labels for the training set and convert the latter into categorial vectors:
datagen_top = ImageDataGenerator(rescale=1./255)
generator_top = datagen_top.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='categorical',
shuffle=False)
nb_train_samples = len(generator_top.filenames)
num_classes = len(generator_top.class_indices)
# Loading the features:
train_data = np.load('bottleneck_features_train.npy')
# Converting training data into vectors of categories:
train_labels = generator_top.classes
print('Classes before dummification:',train_labels)
train_labels = to_categorical(train_labels, num_classes=num_classes)
print('Classes after dummification:\n\n',train_labels)
# Again repeating the process with the validation data:
generator_top = datagen_top.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode=None,
shuffle=False)
nb_validation_samples = len(generator_top.filenames)
validation_data = np.load('bottleneck_features_validation.npy')
validation_labels = generator_top.classes
validation_labels = to_categorical(validation_labels, num_classes=num_classes)
### Building the small FL model using bottleneck features as input
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
# model.add(Dense(1024, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(8, activation='relu')) # Not valid for minimum = 500
model.add(Dropout(0.5))
# model.add(Dense(4, activation='relu')) # Not valid for minimum = 500
# model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='sigmoid'))
model.compile(optimizer='Adam',
loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(train_data, train_labels,
epochs=epochs,
batch_size=batch_size,
validation_data=(validation_data, validation_labels))
model.save_weights(top_model_weights_path)
(eval_loss, eval_accuracy) = model.evaluate(
validation_data, validation_labels,
batch_size=batch_size, verbose=1)
print("[INFO] accuracy: {:.2f}%".format(eval_accuracy * 100))
print("[INFO] Loss: {}".format(eval_loss))
train_data.shape[1:]
# model.evaluate(
# validation_data, validation_labels, batch_size=batch_size, verbose=1)
# model.predict_classes(validation_data)
# model.metrics_names
#top_k_categorical_accuracy(y_true, y_pred, k=5)
### Plotting the accuracy history
plt.figure(1)
# summarize history for accuracy
plt.subplot(211)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
#pylab.ylim([0.4,0.68])
plt.legend(['train', 'test'], loc='upper left')
### Plotting the loss history
import pylab
plt.subplot(212)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
pylab.xlim([0,60])
# pylab.ylim([0,1000])
plt.show()
import matplotlib.pyplot as plt
import pylab
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
fig = plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Classification Model Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
pylab.xlim([0,60])
plt.legend(['Test', 'Validation'], loc='upper right')
fig.savefig('loss.png')
plt.show();
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
fig = plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.plot(figsize=(15,15))
plt.title('Classification Model Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
pylab.xlim([0,100])
plt.legend(['Test', 'Validation', 'Success Metric'], loc='lower right')
fig.savefig('acc.png')
plt.show();
### Predictions
os.listdir(os.path.abspath('train_toy_3/Pierre-Auguste_Renoir))
image_path = os.path.abspath('test_toy_3/Pierre-Auguste_Renoir/91485.jpg')
orig = cv2.imread(image_path)
image = load_img(image_path, target_size=(120,120))
image
image = img_to_array(image)
image
image = image / 255.
image = np.expand_dims(image, axis=0)
image
# build the VGG16 network
#model = applications.VGG16(include_top=False, weights='imagenet')
model = applications.InceptionV3(include_top=False, weights='imagenet')
# get the bottleneck prediction from the pre-trained VGG16 model
bottleneck_prediction = model.predict(image)
# build top model
model = Sequential()
model.add(Flatten(input_shape=train_data.shape[1:]))
# model.add(Dense(1024, activation='relu'))
# model.add(Dropout(0.5))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(32, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(16, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(8, activation='relu')) # Not valid for minimum = 500
model.add(Dropout(0.5))
# model.add(Dense(4, activation='relu')) # Not valid for minimum = 500
# model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='sigmoid'))
model.load_weights(top_model_weights_path)
# use the bottleneck prediction on the top model to get the final classification
class_predicted = model.predict_classes(bottleneck_prediction)
inID = class_predicted[0]
class_dictionary = generator_top.class_indices
inv_map = {v: k for k, v in class_dictionary.items()}
label = inv_map[inID]
# get the prediction label
print("Image ID: {}, Label: {}".format(inID, label))
# display the predictions with the image
cv2.putText(orig, "Predicted: {}".format(label), (10, 30), cv2.FONT_HERSHEY_PLAIN, 1.5, (43, 99, 255), 2)
cv2.imshow("Classification", orig)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | noreply@github.com |
273ec318437049163304e122bcd887729e5f96f8 | 5832aced2d94c06adac0c9de719beee5c0d7d1a8 | /pancancer_evaluation/utilities/data_utilities.py | 92f8ddd0ff4af1bd7a40274d27ea38f5abde0abc | [
"BSD-3-Clause"
] | permissive | greenelab/wenda_gpu_paper | 4cde1cc239448832df6ac9c6e9b6ae2ff8a3c869 | 60ae443d7ce72f572d16c76b2127d4b7b661d14f | refs/heads/main | 2023-03-01T01:48:06.174592 | 2022-04-19T21:28:21 | 2022-04-19T21:28:21 | 457,892,363 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,291 | py | # Functions for reading and processing input data.
# Originally written by Jake Crawford.
import os
import sys
from pathlib import Path
import numpy as np
import pandas as pd
import pickle as pkl
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.preprocessing import MinMaxScaler
import pancancer_evaluation.config as cfg
def load_expression_data(scale_input=False, verbose=False, debug=False):
"""Load and preprocess saved TCGA gene expression data.
Arguments
---------
scale_input (bool): whether or not to scale the expression data
verbose (bool): whether or not to print verbose output
debug (bool): whether or not to subset data for faster debugging
Returns
-------
rnaseq_df: samples x genes expression dataframe
"""
if debug:
if verbose:
print('Loading subset of gene expression data for debugging...',
file=sys.stderr)
rnaseq_df = pd.read_csv(cfg.test_expression, index_col=0, sep='\t')
else:
if verbose:
print('Loading gene expression data...', file=sys.stderr)
rnaseq_df = pd.read_csv(cfg.rnaseq_data, index_col=0, sep='\t')
# Scale RNAseq matrix the same way RNAseq was scaled for
# compression algorithms
if scale_input:
fitted_scaler = MinMaxScaler().fit(rnaseq_df)
rnaseq_df = pd.DataFrame(
fitted_scaler.transform(rnaseq_df),
columns=rnaseq_df.columns,
index=rnaseq_df.index,
)
return rnaseq_df
def load_pancancer_data(verbose=False, test=False, subset_columns=None):
"""Load pan-cancer relevant data from previous Greene Lab repos.
Data being loaded includes:
* sample_freeze_df: list of samples from TCGA "data freeze" in 2017
* mutation_df: deleterious mutation count information for freeze samples
(this is a samples x genes dataframe, entries are the number of
deleterious mutations in the given gene for the given sample)
* copy_loss_df: copy number loss information for freeze samples
* copy_gain_df: copy number gain information for freeze samples
* mut_burden_df: log10(total deleterious mutations) for freeze samples
Most of this data was originally compiled and documented in Greg's
pancancer repo: http://github.com/greenelab/pancancer
See, e.g.
https://github.com/greenelab/pancancer/blob/master/scripts/initialize/process_sample_freeze.py
for more info on mutation processing steps.
Arguments
---------
verbose (bool): whether or not to print verbose output
Returns
-------
pancan_data: TCGA "data freeze" mutation information described above
"""
# loading this data from the pancancer repo is very slow, so we
# cache it in a pickle to speed up loading
if test:
data_filepath = cfg.test_pancan_data
else:
data_filepath = cfg.pancan_data
if os.path.exists(data_filepath):
if verbose:
print('Loading pan-cancer data from cached pickle file...', file=sys.stderr)
with open(data_filepath, 'rb') as f:
pancan_data = pkl.load(f)
else:
if verbose:
print('Loading pan-cancer data from repo (warning: slow)...', file=sys.stderr)
pancan_data = load_pancancer_data_from_repo(subset_columns)
with open(data_filepath, 'wb') as f:
pkl.dump(pancan_data, f)
return pancan_data
def load_top_50():
"""Load top 50 mutated genes in TCGA from BioBombe repo.
These were precomputed for the equivalent experiments in the
BioBombe paper, so no need to recompute them.
"""
base_url = "https://github.com/greenelab/BioBombe/raw"
commit = "aedc9dfd0503edfc5f25611f5eb112675b99edc9"
file = "{}/{}/9.tcga-classify/data/top50_mutated_genes.tsv".format(
base_url, commit)
genes_df = pd.read_csv(file, sep='\t')
return genes_df
def load_vogelstein():
"""Load list of cancer-relevant genes from Vogelstein and Kinzler,
Nature Medicine 2004 (https://doi.org/10.1038/nm1087)
These genes and their oncogene or TSG status were precomputed in
the pancancer repo, so we just load them from there.
"""
base_url = "https://github.com/greenelab/pancancer/raw"
commit = "2a0683b68017fb226f4053e63415e4356191734f"
file = "{}/{}/data/vogelstein_cancergenes.tsv".format(
base_url, commit)
genes_df = (
pd.read_csv(file, sep='\t')
.rename(columns={'Gene Symbol' : 'gene',
'Classification*': 'classification'})
)
return genes_df
def get_classification(gene, genes_df=None):
"""Get oncogene/TSG classification from existing datasets for given gene."""
classification = 'neither'
if (genes_df is not None) and (gene in genes_df.gene):
classification = genes_df[genes_df.gene == gene].classification.iloc[0]
else:
genes_df = load_vogelstein()
if gene in genes_df.gene:
classification = genes_df[genes_df.gene == gene].classification.iloc[0]
else:
genes_df = load_top_50()
if gene in genes_df.gene:
classification = genes_df[genes_df.gene == gene].classification.iloc[0]
return classification
def load_pancancer_data_from_repo(subset_columns=None):
"""Load data to build feature matrices from pancancer repo. """
base_url = "https://github.com/greenelab/pancancer/raw"
commit = "2a0683b68017fb226f4053e63415e4356191734f"
file = "{}/{}/data/sample_freeze.tsv".format(base_url, commit)
sample_freeze_df = pd.read_csv(file, index_col=0, sep='\t')
file = "{}/{}/data/pancan_mutation_freeze.tsv.gz".format(base_url, commit)
mutation_df = pd.read_csv(file, index_col=0, sep='\t')
file = "{}/{}/data/copy_number_loss_status.tsv.gz".format(base_url, commit)
copy_loss_df = pd.read_csv(file, index_col=0, sep='\t')
file = "{}/{}/data/copy_number_gain_status.tsv.gz".format(base_url, commit)
copy_gain_df = pd.read_csv(file, index_col=0, sep='\t')
file = "{}/{}/data/mutation_burden_freeze.tsv".format(base_url, commit)
mut_burden_df = pd.read_csv(file, index_col=0, sep='\t')
if subset_columns is not None:
# don't reindex sample_freeze_df or mut_burden_df
# they don't have gene-valued columns
mutation_df = mutation_df.reindex(subset_columns, axis='columns')
copy_loss_df = copy_loss_df.reindex(subset_columns, axis='columns')
copy_gain_df = copy_gain_df.reindex(subset_columns, axis='columns')
return (
sample_freeze_df,
mutation_df,
copy_loss_df,
copy_gain_df,
mut_burden_df
)
def load_sample_info(verbose=False):
if verbose:
print('Loading sample info...', file=sys.stderr)
return pd.read_csv(cfg.sample_info, sep='\t', index_col='sample_id')
def split_stratified(rnaseq_df, sample_info_df, num_folds=4, fold_no=1,
seed=cfg.default_seed):
"""Split expression data into train and test sets.
The train and test sets will both contain data from all cancer types,
in roughly equal proportions.
Arguments
---------
rnaseq_df (pd.DataFrame): samples x genes expression dataframe
sample_info_df (pd.DataFrame): maps samples to cancer types
num_folds (int): number of cross-validation folds
fold_no (int): cross-validation fold to hold out
seed (int): seed for deterministic splits
Returns
-------
rnaseq_train_df (pd.DataFrame): samples x genes train data
rnaseq_test_df (pd.DataFrame): samples x genes test data
"""
# subset sample info to samples in pre-filtered expression data
sample_info_df = sample_info_df.reindex(rnaseq_df.index)
# generate id for stratification
# this is a concatenation of cancer type and sample/tumor type, since we want
# to stratify by both
sample_info_df = sample_info_df.assign(
id_for_stratification = sample_info_df.cancer_type.str.cat(
sample_info_df.sample_type)
)
# recode stratification id if they are singletons or near-singletons,
# since these won't work with StratifiedKFold
stratify_counts = sample_info_df.id_for_stratification.value_counts().to_dict()
sample_info_df = sample_info_df.assign(
stratify_samples_count = sample_info_df.id_for_stratification
)
sample_info_df.stratify_samples_count = sample_info_df.stratify_samples_count.replace(
stratify_counts)
sample_info_df.loc[
sample_info_df.stratify_samples_count < num_folds, 'id_for_stratification'
] = 'other'
# now do stratified CV splitting and return the desired fold
kf = StratifiedKFold(n_splits=num_folds, shuffle=True, random_state=seed)
for fold, (train_ixs, test_ixs) in enumerate(
kf.split(rnaseq_df, sample_info_df.id_for_stratification)):
if fold == fold_no:
train_df = rnaseq_df.iloc[train_ixs]
test_df = rnaseq_df.iloc[test_ixs]
return train_df, test_df, sample_info_df
def split_by_cancer_type(rnaseq_df,
sample_info_df,
holdout_cancer_type,
use_pancancer=False,
use_pancancer_only=False,
num_folds=4,
fold_no=1,
seed=cfg.default_seed):
"""Split expression data into train and test sets.
The test set will contain data from a single cancer type. The train set
will contain either the remaining data from that cancer type, or the
remaining data from that cancer type and data from all other cancer types
in the dataset.
Arguments
---------
rnaseq_df (pd.DataFrame): samples x genes expression dataframe
sample_info_df (pd.DataFrame): maps samples to cancer types
holdout_cancer_type (str): cancer type to hold out
use_pancancer (bool): whether or not to include pan-cancer data in train set
use_pancancer_only (bool): if True, use only pan-cancer data as train set
(i.e. without data from the held-out cancer type)
num_folds (int): number of cross-validation folds
fold_no (int): cross-validation fold to hold out
seed (int): seed for deterministic splits
Returns
-------
rnaseq_train_df (pd.DataFrame): samples x genes train data
rnaseq_test_df (pd.DataFrame): samples x genes test data
"""
cancer_type_sample_ids = (
sample_info_df.loc[sample_info_df.cancer_type == holdout_cancer_type]
.index
)
cancer_type_df = rnaseq_df.loc[rnaseq_df.index.intersection(cancer_type_sample_ids), :]
cancer_type_train_df, rnaseq_test_df = split_single_cancer_type(
cancer_type_df, num_folds, fold_no, seed)
if use_pancancer or use_pancancer_only:
pancancer_sample_ids = (
sample_info_df.loc[~(sample_info_df.cancer_type == holdout_cancer_type)]
.index
)
pancancer_df = rnaseq_df.loc[rnaseq_df.index.intersection(pancancer_sample_ids), :]
if use_pancancer:
rnaseq_train_df = pd.concat((pancancer_df, cancer_type_train_df))
elif use_pancancer_only:
rnaseq_train_df = pancancer_df
else:
rnaseq_train_df = cancer_type_train_df
return rnaseq_train_df, rnaseq_test_df
def split_single_cancer_type(cancer_type_df, num_folds, fold_no, seed):
"""Split data for a single cancer type into train and test sets."""
kf = KFold(n_splits=num_folds, shuffle=True, random_state=seed)
for fold, (train_ixs, test_ixs) in enumerate(kf.split(cancer_type_df)):
if fold == fold_no:
train_df = cancer_type_df.iloc[train_ixs]
test_df = cancer_type_df.iloc[test_ixs]
return train_df, test_df
def summarize_results(results, gene, holdout_cancer_type, signal, z_dim,
seed, algorithm, data_type):
"""
Given an input results file, summarize and output all pertinent files
Arguments
---------
results: a results object output from `get_threshold_metrics`
gene: the gene being predicted
holdout_cancer_type: the cancer type being used as holdout data
signal: the signal of interest
z_dim: the internal bottleneck dimension of the compression model
seed: the seed used to compress the data
algorithm: the algorithm used to compress the data
data_type: the type of data (either training, testing, or cv)
"""
results_append_list = [
gene,
holdout_cancer_type,
signal,
z_dim,
seed,
algorithm,
data_type,
]
metrics_out_ = [results["auroc"], results["aupr"]] + results_append_list
roc_df_ = results["roc_df"]
pr_df_ = results["pr_df"]
roc_df_ = roc_df_.assign(
predictor=gene,
signal=signal,
z_dim=z_dim,
seed=seed,
algorithm=algorithm,
data_type=data_type,
)
pr_df_ = pr_df_.assign(
predictor=gene,
signal=signal,
z_dim=z_dim,
seed=seed,
algorithm=algorithm,
data_type=data_type,
)
return metrics_out_, roc_df_, pr_df_
| [
"ariel.hippen@gmail.com"
] | ariel.hippen@gmail.com |
94ca286760368bc7efe7db01e11c244b8fdcad94 | eb70b1610d167e250df8f7cdee6413c896808253 | /computing_and_control/ipcv/histogram_opencv.py | cac9f58d278b4ee3b6b127cba53cfd09d4cff834 | [
"MIT"
] | permissive | aap5869/RIT | cc3c81880546eacd45b01d40bfb74e0e8d4b2e09 | d8a408e59a94b0edde56a207592fd7b803172119 | refs/heads/master | 2021-01-10T14:45:17.071357 | 2016-04-20T15:20:06 | 2016-04-20T15:20:06 | 55,375,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,919 | py | import cv2
import numpy
def histogram(image, bitDepth=8):
"""
title::
histogram_opencv
description::
This method will gneerate the histogram, probability density funciton
and the cumulative density funciton of an image. It will use the
cv2.calcHist method from the opencv library to help generate each
output. Each output will be returned as a list.
attributes::
images
(numpy ndarray) An image file that is read in by the cv2.imread
function. The iamge can be either black and white or full color and
can have any bit depth. For color images, the color channel order
is BGR (blue, green, red).
bitDepth
(int [optional]) The bit depth of each color channel of the image.
Defaults to 8 bits per color channel.
returns::
h
(list) The histogram for the image. For a color image, the
histogram is a list of three lists with each list representing the
histogram for each color channel in BGR order. For a grayscale
image, the histogram will be returned as a 2^N element list, N
being the bit depth of the image.
pdf
(list) The PDF (probability density function) for the image. For a
color image, the PDF is a list of three lists with each list
representing the PDF for each color channle in BGR order. For a
grayscale image, the PDf will be returned as a 2^N element list, N
being the bit depth of the image.
cdf
(list) The CDF (cumulative density function) for the image. For a
color image, the CDF is a list of three lists with each list
representing the CDF for each color channel in BGR order. FOr a
grayscale iamge, the CDF will be returned as a 2^N element list,
N being the bit depth of the image.
author::
Alex Perkins
copyright::
Copyright (C) 2016, Rochester Institute of Technology
version::
1.0.0
"""
# Determine the number of pixel values in the image
maxCount = 2**bitDepth
# Check if the image is a color image
if len(image.shape) == 3:
# Get the number of rows, columns and planes in the image
rows, cols, planes = image.shape
# Determine the number of pixels in the image
numPixels = rows*cols
# Create the histogram with BGR color channels
h = numpy.array([[0]*maxCount, [0]*maxCount, [0]*maxCount])
# Iterate through each color channel and get the histogram for each
for plane in range(planes):
h[plane] = cv2.calcHist([image], [plane], None, [maxCount],\
[0, maxCount]).reshape(-1)
# Generate the PDF and CDF for the image
pdf = h/numPixels
cdf = numpy.cumsum(pdf, axis=1)
# Image is grayscale if previous check is not met
else:
# Get the number of rows and columns in the image
rows, cols = image.shape
# Determine the number of pixels in the image
numPixels = rows*cols
# Get the histogram for the image and generate the PDF and CDF for
# the image
h = cv2.calcHist([image], [0], None, [maxCount], [0, maxCount])
pdf = h/numPixels
cdf = numpy.cumsum(pdf)
# Convert each output to a list
h = h.tolist()
pdf = pdf.tolist()
cdf = cdf.tolist()
return h, pdf, cdf
if __name__ == '__main__':
import cv2
import ipcv
import time
# A greyscale test image
filename = 'crowd.jpg'
# A 3-channel color test image
filename = 'lenna.tif'
im = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
print('Data type = {0}'.format(type(im)))
print('Image shape = {0}'.format(im.shape))
print('Image size = {0}'.format(im.size))
dataType = str(im.dtype)
imType = {'uint8':8, 'uint16':16, 'uint32':32}
startTime = time.time()
h, pdf, cdf = ipcv.histogram(im, bitDepth=imType[dataType])
print('Elasped time = {0} [s]'.format(time.time() - startTime))
# The follow will produce a figure containing color-coded plots of the
# computed histogram, probability function (PDF), and cumulative density
# function (CDF)
import matplotlib.pyplot
import matplotlib.backends.backend_agg
maxCount = 2**imType[dataType]
bins = list(range(maxCount))
figure = matplotlib.pyplot.figure('Histogram')
canvas = matplotlib.backends.backend_agg.FigureCanvas(figure)
histAxes = figure.add_subplot(3, 1, 1)
pdfAxes = figure.add_subplot(3, 1, 2)
cdfAxes = figure.add_subplot(3, 1, 3)
if len(im.shape) == 3:
histAxes.set_ylabel('Number of Pixels')
histAxes.set_xlim([0, maxCount - 1])
histAxes.plot(bins, h[0], 'b', \
bins, h[1], 'g', \
bins, h[2], 'r')
pdfAxes.set_ylabel('PDF')
pdfAxes.set_xlim([0, maxCount - 1])
pdfAxes.plot(bins, pdf[0], 'b', \
bins, pdf[1], 'g', \
bins, pdf[2], 'r')
cdfAxes.set_xlabel('Digital Count')
cdfAxes.set_ylabel('CDF')
cdfAxes.set_xlim([0, maxCount - 1])
cdfAxes.plot(bins, cdf[0], 'b', \
bins, cdf[1], 'g', \
bins, cdf[2], 'r')
else:
histAxes.set_ylabel('Number of Pixels')
histAxes.set_xlim([0, maxCount - 1])
histAxes.plot(bins, h, 'k')
pdfAxes.set_ylabel('PDF')
pdfAxes.set_xlim([0, maxCount - 1])
pdfAxes.plot(bins, pdf, 'k')
cdfAxes.set_xlabel('Digital Count')
cdfAxes.set_ylabel('CDF')
cdfAxes.set_xlim([0, maxCount - 1])
cdfAxes.plot(bins, cdf, 'k')
matplotlib.pyplot.show()
| [
"aap5869@g.rit.edu"
] | aap5869@g.rit.edu |
bf6a35193c25eeb08ef1d1610147904daa40253f | 6be2d138b50c0968c7997688f941bfb0e0230ae6 | /management_audit/models.py | bc896ea922f40d0da6dcdc9098f5418e7414d784 | [] | permissive | hiisi13/django-management-audit | 0cbd8403a105509b5a72bc2a1b20f6bb1a00c6bb | 3d8a289f8388f1ba89eca8903c0e1af88d62e6b2 | refs/heads/master | 2023-01-12T08:48:04.731561 | 2014-06-16T06:05:19 | 2014-06-16T06:05:28 | 20,496,147 | 0 | 0 | BSD-3-Clause | 2022-12-26T19:45:31 | 2014-06-04T19:06:16 | Python | UTF-8 | Python | false | false | 272 | py | # -*- coding: utf-8 -*-
from django.db import models
class Audit(models.Model):
command_name = models.CharField(max_length=80)
date_started = models.DateTimeField()
date_ended = models.DateTimeField()
class Meta:
app_label = 'management_audit'
| [
"dmitry-kozhedubov@yandex.ru"
] | dmitry-kozhedubov@yandex.ru |
d90fcac0e12cd0f321dbfa11976d0074cb2a681c | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_75/200.py | fffe0758681ce42de20ca4fef4e35391db502cce | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py |
def checkCase(data):
elements=[]
nonbase=[]
opposed=[]
for i in xrange(0, int(data[0])):
nonbase.append((data[i+1][0],data[i+1][1],data[i+1][2]))
data=data[int(data[0])+1:]
for i in xrange(0, int(data[0])):
opposed.append((data[i+1][0],data[i+1][1]))
data=data[-1]
for cmd in data:
try:
if len(elements) > 0:
for n in nonbase:
if (n[0] == elements[-1] and cmd == n[1]) or (n[1] == elements[-1] and cmd == n[0]):
elements[-1]=n[2]
1/0
for o in opposed:
if (o[0] in elements and cmd == o[1]) or (o[1] in elements and cmd == o[0]):
elements=[]
1/0
elements.append(cmd)
except:
pass
return str(elements).replace("'","")
data=open("B-large.in","r").read()
data=data.splitlines()[1:]
out=open("out.txt","w")
for c in xrange(0, len(data)):
tmp=data[c].split(" ")
out.write("Case #%i: %s\n"%(c+1,checkCase(tmp)))
out.close() | [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
30fb72f40582c23a9f9dd19a02d75877810dce08 | 44b9fa8d1113299d327d087da73febf26bef61e7 | /WINDOW_openMDAO/AEP/FastAEP/farm_energy/wake_model_mean_new/wake_overlap.py | ff871ca75c2e2612402c55bb235094bbeda88a94 | [] | no_license | sebasanper/WINDOW_openMDAO | 828e6d38546e706d23e4920b1c6e857c6be10825 | 3779fa8380874bc2cd7380df90339b37806a6a60 | refs/heads/master | 2023-04-12T22:09:42.951295 | 2023-04-05T08:54:15 | 2023-04-05T08:54:15 | 107,442,976 | 3 | 9 | null | 2019-01-20T16:32:42 | 2017-10-18T17:47:04 | HTML | UTF-8 | Python | false | false | 789 | py | from math import sqrt
def root_sum_square(array_deficits):
# This is one model, root sum square of individual wind speed deficits.
total_deficit = sqrt(sum([deficit ** 2.0 for deficit in array_deficits]))
return total_deficit
def multiplied(array_deficits):
total_deficit = 1.0
for element in array_deficits:
total_deficit *= element
return total_deficit
def summed(array_deficits):
total_deficit = sum(array_deficits)
if total_deficit > 1.0:
total_deficit = 1.0
return total_deficit
def maximum(array_deficits):
return max(array_deficits)
if __name__ == '__main__':
deficits = [0.3, 0.4]
# print root_sum_square(deficits)
# print multiplied(deficits)
# print summed(deficits)
# print maximum(deficits)
| [
"s.sanchezperezmoreno@tudelft.nl"
] | s.sanchezperezmoreno@tudelft.nl |
86cfe8a6f28681768008e205860dc50ea646a073 | 76af5f63e173850a461dd104d696a3ad86958b6d | /ObjectDetectionDeps/Generate_Labelmap.py | 543168988a48df229884cc695dd2deda73776def | [] | no_license | Danny-Dasilva/Tensorflow_Object_Detection | 599b76d86918b1425a8d9e35d6dc5644224e6692 | b0386dfac730b516594d511849560ff59a2bf979 | refs/heads/master | 2022-03-30T11:34:08.595899 | 2020-02-08T16:35:20 | 2020-02-08T16:35:20 | 197,986,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 756 | py |
import os
path = os.environ['IMAGEPATH'] + '/Train_labels.csv'
import csv
col = []
with open(path, newline='') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
#print(set(row['class']))
col.append(row['class'])
classes = list(set(col))
print(classes)
count = 0
pbtxt_label = open("labelmap.pbtxt","w")
for label in classes:
count += 1
pbtxt_label.write("item {\n")
pbtxt_label.write(" id: %s\n" % (count))
pbtxt_label.write(" name: '%s'\n" % (label))
pbtxt_label.write("}\n")
pbtxt_label.write("\n")
count = 0
txt_label = open("labels.txt","w")
for label in classes:
txt_label.write("%s %s\n" % (count, label))
count += 1
pbtxt_label.close()
txt_label.close() | [
"yahchayildasilva@gmail.com"
] | yahchayildasilva@gmail.com |
34d6c85cbf533f303b059ed7e10e34a4332e91a5 | 9d440715f9263086f8b69d5d8cce8542ba43786e | /two_sum.py | 4eed31eb02c287a4ca0d0d8da44f5fe3931e0907 | [] | no_license | mickey1233/two_sum2 | 41bc17220fc3eb0aca284d1bf8f5e191ae840a7a | c779d30e65f0d770e325125228063585ccb2b5f1 | refs/heads/main | 2023-08-14T13:09:52.904208 | 2021-09-15T12:58:23 | 2021-09-15T12:58:23 | 406,764,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | def twosum(numbers, target):
map = {}
for i in range(len(numbers)):
if target-numbers[i] not in map:
map[numbers[i]] = i
else:
print(list(map[target-numbers[i]]+1, i+1))
def main():
twosum([2, 7, 11, 15], 9)
twosum([2, 3, 4], 6)
twosum([-1, 0], -1)
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
38134a59f724c411568dd11a3bfac0f88a1b52b7 | e9c86da8186bcdd507bf26633fef1601d519631d | /config.py | b4622fbfb01bc6b411aed34b2fc9039681bcc1ea | [
"Apache-2.0"
] | permissive | LGM-Curtis/Bilibili-picture_area | 9f82d4d931a7a4de5f3d9eccfd42142078c8b99c | 8cbad6ba28036045526f5fd516f1f19d4306f144 | refs/heads/master | 2020-09-05T03:01:16.515166 | 2019-11-06T10:28:24 | 2019-11-06T10:28:24 | 219,963,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | """"
Cosplay分区
最热 'https://api.vc.bilibili.com/link_draw/v2/Photo/list?category=cos&type=hot&page_num=0&page_size=20'
最新 'https://api.vc.bilibili.com/link_draw/v2/Photo/list?category=cos&type=new&page_num=0&page_size=20'
私服分区
最热 'https://api.vc.bilibili.com/link_draw/v2/Photo/list?category=sifu&type=hot&page_num=0&page_size=20'
最新 'https://api.vc.bilibili.com/link_draw/v2/Photo/list?category=sifu&type=new&page_num=0&page_size=20'
""" | [
"45490062+LGM-Curtis@users.noreply.github.com"
] | 45490062+LGM-Curtis@users.noreply.github.com |
cc97afe2fdf21ecab13afb0ddcfafe6ae2c2144f | 9002c1b9f6ca9c70f5cc25899d5fe9b193de3a52 | /project/models.py | c752a0d6a7e28ef6f5f068a57f14b6b33e770ac7 | [] | no_license | SandipanGhosh/FlaskTaskManager-02 | b9f0254f72ef4df89f25bac87caac34ece9d8be1 | c6a6d9b7f0754d80c773f1c9b0e98ba200a2b871 | refs/heads/master | 2021-05-09T09:09:27.427759 | 2018-02-02T21:07:36 | 2018-02-02T21:07:36 | 119,424,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,258 | py | # project/models.py
from views import db
import datetime
class Task(db.Model):
__tablename__ = "tasks"
task_id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
due_date = db.Column(db.Date, nullable=False)
priority = db.Column(db.Integer, nullable=False)
posted_date = db.Column(db.Date, default=datetime.datetime.utcnow())
status = db.Column(db.Integer)
user_id = db.Column(db.Integer, db.ForeignKey('users.id'))
def __init__(self, name, due_date, priority, posted_date, status, user_id):
self.name = name
self.due_date = due_date
self.priority = priority
self.posted_date = posted_date
self.status = status
self.user_id = user_id
def __repr__(self):
return '<name {0}>'.format(self.name)
class User(db.Model):
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True, nullable=False)
email = db.Column(db.String, unique=True, nullable=False)
password = db.Column(db.String, nullable=False)
tasks = db.relationship('Task', backref='poster')
def __init__(self, name=None, email=None, password=None):
self.name = name
self.email = email
self.password = password
def __repr__(self):
return '<User {0}>'.format(self.name)
| [
"sandipan.ghosh2005@gmail.com"
] | sandipan.ghosh2005@gmail.com |
6da9faa43719b34fe1f5824aa6c271c993fb4534 | 7ad0ea6e17c6505c419f70b956a06f36b734779b | /BizchoolLab/project/urls.py | e8a928037905962c68a0c309904a81b359a0d1ac | [] | no_license | yoongyo/BizLab | 34cb2e6386030fb091853d90065063367ae32521 | dfe5f1e69d6a711e96f0f456f36ecfbccf010892 | refs/heads/master | 2020-04-12T13:08:55.782891 | 2018-12-20T02:09:54 | 2018-12-20T02:09:54 | 162,513,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | from django.urls import path, re_path
from . import views
urlpatterns = [
re_path(r'^new/$', views.project_new, name="project_new"),
re_path(r'^$', views.project_list, name="project_list"),
re_path(r'^(?P<pk>\d+)/$', views.project_detail, name="project_detail"),
re_path(r'^(?P<pk>\d+)/Edit/$', views.project_edit, name="project_edit"),
] | [
"jyg0172@naver.com"
] | jyg0172@naver.com |
fc4ab389840c49b9e8ef3baf3fbebff64ef16ba7 | a4e14955779a44024d3299ecc4857741a00236e0 | /src/test_client.py | cbe11ea781d682484a0b4ff8efbc082d3a321478 | [] | no_license | Cornell-Tech-Turtlebot/orchestrator | 226e1170d5348275ff70025d5e94339f40c168dd | 449c97080fe3141b99597e44b2d69f5869729041 | refs/heads/master | 2022-12-11T20:20:35.101870 | 2020-08-23T18:04:10 | 2020-08-23T18:04:10 | 283,891,298 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,130 | py | #!/usr/bin/python
import rospy
from std_msgs.msg import String
from rospy_tutorials.srv import *
def status_out():
rospy.init_node('orch', anonymous=True)
pub = rospy.Publisher('status_out', String, queue_size=10)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
msg_out = "hello world %s" % rospy.get_time()
rospy.loginfo(msg_out)
pub.publish(msg_out)
rate.sleep()
def callback(data):
rospy.loginfo(rospy.get_caller_id() + 'I heard %s', data.data)
def status_in():
rospy.init_node('orch', anonymous=True)
rospy.Subscriber('status_in', String, callback)
rospy.spin()
"""
EVERYTING ABOVE WAS FOR TESTING
"""
def orchestrator():
print('waiting on service')
rospy.wait_for_service('add_two_ints')
print('service initiated')
try:
add_two_ints = rospy.Service('add_two_ints', AddTwoInts)
resp1 = add_two_ints(5, 10)
print('answer = ' + resp1.sum)
except rospy.ServiceException as e:
print('Service call failed: %s' % e)
if __name__ == '__main__':
try:
#status_out()
#status_in()
print('run that shit')
orchestrator()
except rospy.ROSInterruptException:
print('fuck')
pass
| [
"zachgitt@umich.edu"
] | zachgitt@umich.edu |
f2536c6d3f382ecd5d7c0ab7aa19a39a61db1aff | 453d2e699d218fdb3bc1e535a707988194ac6717 | /lib/opengl/postproc/base.py | e38f2f67f6835325a7416c113e3b01a34d8e7a81 | [
"MIT"
] | permissive | defgsus/thegame | d54ffcd343c7e1805d2c11e24cd38b02243e73d4 | 38a627d9108f1418b94b08831fd640dd87fbba83 | refs/heads/master | 2023-07-23T06:32:40.297591 | 2022-04-11T12:02:32 | 2022-04-11T12:02:32 | 127,875,178 | 1 | 0 | MIT | 2023-07-06T22:07:07 | 2018-04-03T08:21:31 | Python | UTF-8 | Python | false | false | 1,264 | py | from ..RenderSettings import RenderSettings
from ..RenderNode import RenderNode
from ..ScreenQuad import ScreenQuad
from ..core.Shader import Shader
class PostProcNode(RenderNode):
def __init__(self, name):
super().__init__(name)
self.quad = ScreenQuad(name="pp-quad-%s" % self.name)
self.do_compile = True
def release(self):
self.quad.release()
def render(self, rs: RenderSettings, pass_num: int):
if self.do_compile:
self.quad.set_shader_code(self.get_code())
self.do_compile = False
self.quad.drawable.shader.set_uniform("u_tex1", 0)
self.quad.drawable.shader.set_uniform("u_tex2", 1)
self.quad.drawable.shader.set_uniform("u_tex3", 2)
self.quad.drawable.shader.set_uniform("u_tex4", 3)
self.quad.drawable.shader.set_uniform("u_time", rs.time)
self.update_uniforms(self.quad.drawable.shader, rs, pass_num=pass_num)
self.quad.draw(rs.render_width, rs.render_height)
#self.quad.draw_centered(rs.render_width, rs.render_height, rs.render_width, rs.render_height)
def get_code(self):
raise NotImplementedError
def update_uniforms(self, shader: Shader, rs: RenderSettings, pass_num: int):
pass
| [
"s.berke@netzkolchose.de"
] | s.berke@netzkolchose.de |
038243668ac16b39e17fbc3ecc4dfe6eb39856d0 | 2031771d8c226806a0b35c3579af990dd0747e64 | /pyobjc-framework-Photos/PyObjCTest/test_phphotolibrary.py | 7174dd32bc9163fff070ec2f446bfa8aa62aa0cf | [
"MIT"
] | permissive | GreatFruitOmsk/pyobjc-mirror | a146b5363a5e39181f09761087fd854127c07c86 | 4f4cf0e4416ea67240633077e5665f5ed9724140 | refs/heads/master | 2018-12-22T12:38:52.382389 | 2018-11-12T09:54:18 | 2018-11-12T09:54:18 | 109,211,701 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | from PyObjCTools.TestSupport import *
import sys
if sys.maxsize > 2 ** 32:
import Photos
class TestPHPhotoLibrary (TestCase):
def testConstants(self):
self.assertEqual(Photos.PHAuthorizationStatusNotDetermined, 0)
self.assertEqual(Photos.PHAuthorizationStatusRestricted, 1)
self.assertEqual(Photos.PHAuthorizationStatusDenied, 2)
self.assertEqual(Photos.PHAuthorizationStatusAuthorized, 3)
@min_sdk_level('10.13')
def testProtocols(self):
objc.protocolNamed('PHPhotoLibraryChangeObserver')
@min_os_level('10.13')
def testMethods(self):
self.assertArgIsBlock(Photos.PHPhotoLibrary.requestAuthorization_, 0, b'v' + objc._C_NSInteger)
self.assertArgIsBlock(Photos.PHPhotoLibrary.performChanges_completionHandler_, 1, b'vZ@')
self.assertArgIsOut(Photos.PHPhotoLibrary.performChangesAndWait_error_, 1)
self.assertResultIsBOOL(Photos.PHPhotoLibrary.performChangesAndWait_error_)
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
ce8aa85757920f22d73bf26e17470172c47f5e75 | 15db063c0c420ae080c5a3261305c133842fbd09 | /generate_filelist.py | f1964b18306011881724105c36b6b6390fd21cf3 | [
"MIT"
] | permissive | Viarow/Mask_RCNN | f6e61c236cdae4d32fa04a2a8b1d54eb513b10ca | e88a0f29785a94b4adbc493fdb63d8ef8e300edf | refs/heads/master | 2020-07-09T03:26:51.892382 | 2019-09-15T08:37:08 | 2019-09-15T08:37:08 | 203,862,431 | 0 | 0 | NOASSERTION | 2019-08-22T19:47:43 | 2019-08-22T19:47:42 | null | UTF-8 | Python | false | false | 1,589 | py | import os
from tqdm import tqdm
_UCF101_ROOT_ = '/media/Med_6T2/mmaction/data_tools/ucf101/videos/'
def average_filelist(root_path, vid_num, filelist_path):
listfile = open(filelist_path, 'w')
all_classes = os.listdir(root_path)
all_classes.sort()
for c_item in tqdm(all_classes):
class_path = os.path.join(root_path, c_item)
all_videos = os.listdir(class_path)
if len(all_videos) >= vid_num:
videos = all_videos[0: vid_num]
else:
print("{:d} videos in ".format(len(all_videos)) + c_item +'\n')
for v_item in videos:
listfile.writelines(os.path.join(c_item, v_item) + '\n')
print("Generating video lists for " + c_item + '\n')
_Kinetics400_ROOT_ = '/media/Med_6T2/mmaction/data_tools/kinetics400/videos_val/'
def selected_filelist(selected_classes, root_path, vid_num, filelist_path):
listfile = open(filelist_path, 'w')
all_classes = selected_classes
for c_item in tqdm(all_classes):
class_path = os.path.join(root_path, c_item)
all_videos = os.listdir(class_path)
if len(all_videos) >= vid_num:
videos = all_videos[0: vid_num]
else:
print("{:d} videos in ".format(len(all_videos)) + c_item +'\n')
for v_item in videos:
listfile.writelines(os.path.join(c_item, v_item) + '\n')
print("Generating video lists for " + c_item + '\n')
def generate_filelist():
average_filelist(_UCF101_ROOT_, 50, './ucf101_testlist.txt')
if __name__ == '__main__':
generate_filelist()
| [
"universe37@outlook.com"
] | universe37@outlook.com |
a3058160dea228fc765e45cdcec942bd35ec57a9 | 148072ce210ca4754ea4a37d83057e2cf2fdc5a1 | /src/core/w3af/w3af/core/data/db/tests/test_dbms.py | cf0aed0578e3412ae13b214eeeea0442098cd14d | [] | no_license | ycc1746582381/webfuzzer | 8d42fceb55c8682d6c18416b8e7b23f5e430c45f | 0d9aa35c3218dc58f81c429cae0196e4c8b7d51b | refs/heads/master | 2021-06-14T18:46:59.470232 | 2017-03-14T08:49:27 | 2017-03-14T08:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,053 | py | # -*- coding: UTF-8 -*-
"""
Copyright 2013 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
import string
import os
from itertools import repeat, starmap
from random import choice
from w3af.core.data.db.dbms import SQLiteDBMS, get_default_temp_db_instance
from w3af.core.controllers.exceptions import DBException, NoSuchTableException
from w3af.core.controllers.misc.temp_dir import (get_temp_dir,
create_temp_dir,
remove_temp_dir)
def get_temp_filename():
temp_dir = get_temp_dir()
fname = ''.join(starmap(choice, repeat((string.letters,), 18)))
filename = os.path.join(temp_dir, fname + '.w3af.temp_db')
return filename
class TestDBMS(unittest.TestCase):
def setUp(self):
create_temp_dir()
def tearDown(self):
remove_temp_dir()
def test_open_error(self):
invalid_filename = '/'
self.assertRaises(DBException, SQLiteDBMS, invalid_filename)
def test_simple_db(self):
db = SQLiteDBMS(get_temp_filename())
db.create_table('TEST', set([('id', 'INT'), ('data', 'TEXT')])).result()
db.execute('INSERT INTO TEST VALUES (1,"a")').result()
self.assertIn(('1', 'a'), db.select('SELECT * from TEST'))
self.assertEqual(('1', 'a'), db.select_one('SELECT * from TEST'))
def test_select_non_exist_table(self):
db = SQLiteDBMS(get_temp_filename())
self.assertRaises(NoSuchTableException, db.select, 'SELECT * from TEST')
def test_default_db(self):
db = get_default_temp_db_instance()
db.create_table('TEST', set([('id', 'INT'), ('data', 'TEXT')])).result()
db.execute('INSERT INTO TEST VALUES (1,"a")').result()
self.assertIn(('1', 'a'), db.select('SELECT * from TEST'))
self.assertEqual(('1', 'a'), db.select_one('SELECT * from TEST'))
def test_simple_db_with_pk(self):
db = SQLiteDBMS(get_temp_filename())
fr = db.create_table('TEST', [('id', 'INT'), ('data', 'TEXT')], ['id'])
fr.result()
self.assertEqual([], db.select('SELECT * from TEST'))
def test_drop_table(self):
db = SQLiteDBMS(get_temp_filename())
fr = db.create_table('TEST', [('id', 'INT'), ('data', 'TEXT')], ['id'])
fr.result()
db.drop_table('TEST').result()
self.assertRaises(DBException, db.drop_table('TEST').result)
def test_simple_db_with_index(self):
db = SQLiteDBMS(get_temp_filename())
fr = db.create_table('TEST', [('id', 'INT'), ('data', 'TEXT')], ['id'])
fr.result()
db.create_index('TEST', ['data']).result()
self.assertRaises(DBException,
db.create_index('TEST', ['data']).result)
def test_table_exists(self):
db = SQLiteDBMS(get_temp_filename())
self.assertFalse(db.table_exists('TEST'))
db = SQLiteDBMS(get_temp_filename())
db.create_table('TEST', [('id', 'INT'), ('data', 'TEXT')], ['id'])
self.assertTrue(db.table_exists('TEST'))
def test_close_twice(self):
db = SQLiteDBMS(get_temp_filename())
db.close()
db.close()
class TestDefaultDB(unittest.TestCase):
def test_get_default_temp_db_instance(self):
self.assertEqual(id(get_default_temp_db_instance()),
id(get_default_temp_db_instance()))
| [
"everping@outlook.com"
] | everping@outlook.com |
076aeab25669433e451a3d814becbd6c70e99b89 | 0e2676e962968cc30cab3df6225db34d4efc64d1 | /recipebox/recipes/migrations/0011_auto_20161115_1842.py | f384533c57da54ae239f786063d058b4dd809749 | [
"BSD-3-Clause"
] | permissive | drmatthews/recipebox | f46534fcd9067a237cab486afe3d6d3b0e93a2c9 | d3879a348a6b3580ec448a8f4d6d71c33ef653f4 | refs/heads/master | 2021-01-15T15:37:27.235332 | 2016-11-21T17:39:53 | 2016-11-21T17:39:53 | 55,221,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-11-15 18:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0010_externalrecipe_source'),
]
operations = [
migrations.RemoveField(
model_name='recipe',
name='recipe_picture_url',
),
migrations.AddField(
model_name='recipe',
name='recipe_picture',
field=models.ImageField(blank=True, upload_to=b'images'),
),
migrations.AddField(
model_name='recipe',
name='recipe_thumbnail',
field=models.ImageField(blank=True, upload_to=b'thumbnails'),
),
]
| [
"daniel.r.matthews@kcl.ac.uk"
] | daniel.r.matthews@kcl.ac.uk |
3d985057d89eb79da653bd79087e157f47836539 | 5755d7598e066cacc6a68045dd458d9d07e7aef0 | /hello_world.py | 515637ea003110ea08e3c7f33bffbcac6ec6d7ab | [] | no_license | JemZzz/JEM-SPRITE | 6e7c60202620e3ba18582a5148e965a1ea5b83ad | a2f5653824a0e2e96c2a947c3c739c8335962cb0 | refs/heads/master | 2021-01-02T09:46:20.010435 | 2017-08-04T03:47:33 | 2017-08-04T03:47:33 | 99,295,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | print("This is a git branch")
print("This is a branch master")
| [
"z5157678@ad.unsw.edu.au"
] | z5157678@ad.unsw.edu.au |
b728b7a1c74922c4b5ecc77fd20377d3924e6d66 | 0821d92db624dada6bc50887f6e435ef1e1c03e2 | /norm/common.py | f8233b282b34e20c3f2abe8c3bf385be4388f6bb | [
"MIT"
] | permissive | jcollie/norm | a29a3052705e805ba240232aec1fd6aac59897ba | db303b28e4184cae08228d92868f9409c013096a | refs/heads/master | 2021-01-18T04:19:45.679791 | 2013-07-22T22:34:09 | 2013-07-22T22:34:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,735 | py | # Copyright (c) Matt Haggard.
# See LICENSE for details.
from zope.interface import implements
from twisted.internet import defer
from collections import deque, defaultdict
from norm.interface import IAsyncCursor, IRunner, IPool
class BlockingCursor(object):
"""
I wrap a single DB-API2 db cursor in an asynchronous api.
"""
implements(IAsyncCursor)
def __init__(self, cursor):
self.cursor = cursor
def execute(self, sql, params=()):
return defer.maybeDeferred(self.cursor.execute, sql, params)
def fetchone(self):
return defer.maybeDeferred(self.cursor.fetchone)
def fetchall(self):
return defer.maybeDeferred(self.cursor.fetchall)
def lastRowId(self):
return defer.succeed(self.cursor.lastrowid)
def close(self):
return defer.maybeDeferred(self.cursor.close)
class BlockingRunner(object):
"""
I wrap a single DB-API2 db connection in an asynchronous api.
"""
implements(IRunner)
cursorFactory = BlockingCursor
def __init__(self, conn):
"""
@param conn: A synchronous database connection.
"""
self.conn = conn
def runQuery(self, qry, params=()):
return self.runInteraction(self._runQuery, qry, params)
def _runQuery(self, cursor, qry, params):
d = cursor.execute(qry, params)
d.addCallback(lambda _: cursor.fetchall())
return d
def runOperation(self, qry, params=()):
return self.runInteraction(self._runOperation, qry, params)
def _runOperation(self, cursor, qry, params):
return cursor.execute(qry, params)
def runInteraction(self, function, *args, **kwargs):
cursor = self.cursorFactory(self.conn.cursor())
d = defer.maybeDeferred(function, cursor, *args, **kwargs)
d.addCallback(self._commit)
d.addErrback(self._rollback)
return d
def _commit(self, result):
self.conn.commit()
return result
def _rollback(self, result):
self.conn.rollback()
return result
def close(self):
return defer.maybeDeferred(self.conn.close)
class ConnectionPool(object):
implements(IRunner)
db_scheme = None
def __init__(self, pool=None):
self.pool = pool or NextAvailablePool()
def add(self, conn):
self.pool.add(conn)
def runInteraction(self, function, *args, **kwargs):
return self._runWithConn('runInteraction', function, *args, **kwargs)
def runQuery(self, *args, **kwargs):
return self._runWithConn('runQuery', *args, **kwargs)
def runOperation(self, *args, **kwargs):
return self._runWithConn('runOperation', *args, **kwargs)
def _finish(self, result, conn):
self.pool.done(conn)
return result
def _runWithConn(self, name, *args, **kwargs):
d = self.pool.get()
d.addCallback(self._startRunWithConn, name, *args, **kwargs)
return d
def _startRunWithConn(self, conn, name, *args, **kwargs):
m = getattr(conn, name)
d = m(*args, **kwargs)
return d.addBoth(self._finish, conn)
def close(self):
dlist = []
for item in self.pool.list():
dlist.append(defer.maybeDeferred(item.close))
return defer.gatherResults(dlist)
class NextAvailablePool(object):
"""
I give you the next available object in the pool.
"""
implements(IPool)
def __init__(self):
self._options = deque()
self._all_options = []
self._pending = deque()
self._pending_removal = defaultdict(lambda:[])
def add(self, option):
self._options.append(option)
self._all_options.append(option)
self._fulfillNextPending()
def remove(self, option):
try:
self._options.remove(option)
self._all_options.remove(option)
return defer.succeed(option)
except ValueError:
d = defer.Deferred()
self._pending_removal[option].append(d)
return d
def get(self):
d = defer.Deferred()
self._pending.append(d)
self._fulfillNextPending()
return d
def _fulfillNextPending(self):
if self._pending and self._options:
self._pending.popleft().callback(self._options.popleft())
def done(self, option):
if option in self._pending_removal:
dlist = self._pending_removal.pop(option)
map(lambda d: d.callback(option), dlist)
return
self._options.append(option)
self._fulfillNextPending()
def list(self):
return self._all_options
| [
"haggardii@gmail.com"
] | haggardii@gmail.com |
7bea6a7d912067776c98db744ef90611d4af71f9 | 29c3fcdbdcafa87902fc1ebc2ab501bfc014e61c | /vpmoauth/permissions.py | b0271a6c7052f753c4968fa5f46aeb2b86f3c33a | [] | no_license | gingerComms/vpmo | 762151478298214d753d31619171e2cb5588e80b | 4eaf94f85c470056db6af9dd832e490fdf5ac920 | refs/heads/master | 2022-06-17T18:50:25.422109 | 2018-11-15T10:19:31 | 2018-11-15T10:19:31 | 140,488,185 | 0 | 0 | null | 2022-05-25T00:21:09 | 2018-07-10T21:17:51 | Python | UTF-8 | Python | false | false | 1,048 | py | from rest_framework import permissions
from vpmoauth.models import MyUser
class AssignRolesPermission(permissions.BasePermission):
""" Permission that decides whether a user a can assign a permission or not """
deliverable_types = ["Deliverable", "Topic"]
def has_object_permission(self, request, view, obj):
# Getting the role and permissions a user has for the object
assigning_role = request.data.get("role")
permissions = request.user.get_permissions(obj)
if "update_{}_user_role".format(obj.node_type.lower()) in permissions:
return True
return False
class RemoveRolesPermission(permissions.BasePermission):
""" Checks whether the requesting user has permissions to remove a user's role for a node """
def has_object_permission(self, request, view, obj):
permissions = request.user.get_permissions(obj)
if request.method == "DELETE" and "remove_{}_user".format(obj.node_type.lower()) in permissions:
return True
return False | [
"wigeriaaeriag@gmail.com"
] | wigeriaaeriag@gmail.com |
53118de399b53e109d78e4325c5bd5111b6a559a | 494387316b955e85b73bc971f253dab2f81094ec | /Sell All the Cars.py | d141c650ae86017075a3730cdf270ca18d36f7fc | [] | no_license | chandra2245/Codechef-April-Long-Challenge2020 | 78bfb20dc30efc6985e3d65e802fe3db2b5f8932 | f088573b9120d4d2b4a464fdc78927381b897c89 | refs/heads/master | 2023-06-12T06:04:59.779133 | 2021-06-30T13:47:14 | 2021-06-30T13:47:14 | 263,425,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | test_case=int(input())
while test_case>0:
test_case-=1
final=0
n=int(input())
#flag value storing index
flag=None
array=list(map(int,input().split()))
array.sort()
array.reverse()
#iterating
for iters in range(n):
if array[iters]<iters:
flag=iters-1
break
final+=array[iters]
if flag!=None:
temp_fin=(flag*(flag+1))//2
final-=temp_fin
print(final%1000000007)
#if not none
else:
n=n-1
temp_fin=(n*(n+1))//2
final-=temp_fin
print(final%1000000007)
#if none
| [
"noreply@github.com"
] | noreply@github.com |
adaa57d9b720488987461130043aa0db176584e1 | 3b63782b0c499deeb9e52b1cfe10e0de1f0c14a9 | /quota/Unconfigured.py | cb6a3a2d6583cb7bec1e359cbfe648763a407c4a | [] | no_license | laobubu/ssland | db119efec0f1c635ae994e4308123f1108aa5f4f | 80aff970c608c51f6fca1f447507f4a03fd9bbe3 | refs/heads/universal | 2021-01-21T15:00:04.921472 | 2018-04-08T15:29:15 | 2018-04-08T15:29:15 | 59,768,159 | 113 | 27 | null | 2018-04-08T15:29:16 | 2016-05-26T17:00:52 | Python | UTF-8 | Python | false | false | 249 | py | '''
Default type of Quota. do not modify this.
'''
from django import forms
FRIENDLY_NAME = 'Unconfigured'
class Form(forms.Form):
pass
def descript(q, is_admin=False):
return ["Unconfigured quota."]
def is_exceeded(q):
return False
| [
"laobubu@gmail.com"
] | laobubu@gmail.com |
4de27c4e999264cdb3702646a38ff55fe063e173 | c7be396d97d8aa16374ce94ab39bdc3413fbbcd1 | /python-apiai-webhook/device/application.py | 82c724e369d8325be0afcb77020ba0b5610e3cfe | [] | no_license | bsaxen/my_ioant_examples | 31cdf9c14939149c4bc7f307b40ed79e02a6948d | 5ec83f5d2577fa48aa7db6c314fb8139300386fd | refs/heads/master | 2021-01-22T02:04:20.463409 | 2019-01-20T14:02:51 | 2019-01-20T14:02:51 | 92,329,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,301 | py | # =============================================
# Benny Saxen
# Date: 2017-09-23
# Description: API.ai webhook with IOAnt support
# =============================================
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
import json
import os
import thread
import server
from ioant.sdk import IOAnt
import logging
import hashlib
logger = logging.getLogger(__name__)
tValue = []
aliasToHash = []
aliasToTopic = []
#----------------------------------------------------
def getFilenameFromTopic(topic,extension):
#----------------------------------------------------
filename = topic['global'] + "_" + topic['local'] + "_" + topic['client_id'] + "_" + str(topic['stream_index'])
filename = filename + "." + extension
return filename
#----------------------------------------------------
def getFilenameFromAlias(alias,extension):
#----------------------------------------------------
filename = alias + "." + extension
return filename
#----------------------------------------------------
def writeSubscription(topic,alias):
#----------------------------------------------------
filename = getFilenameFromTopic(topic,"sub")
file = open(filename,"w")
file.write(alias)
file.close()
#filename = getFilenameFromAlias(alias,"sub")
#file = open(filename,"w")
#file.write(topic)
#file.close()
return
#----------------------------------------------------
def readAlias(topic):
#----------------------------------------------------
filename = getFilenameFromTopic(topic,"sub")
file = open(filename,"r")
alias = file.read()
file.close()
return alias
#----------------------------------------------------
def writeValue(alias,value):
#----------------------------------------------------
filename = getFilenameFromAlias(alias,"value")
file = open(filename,"w")
file.write(value)
file.close()
return
#----------------------------------------------------
def readValue(alias):
#----------------------------------------------------
filename = getFilenameFromAlias(alias,"value")
file = open(filename,"r")
value = file.read()
file.close()
return value
#----------------------------------------------------
def getTopicHash(topic):
#----------------------------------------------------
res = topic['top'] + topic['global'] + topic['local'] + topic['client_id'] + str(topic['message_type']) + str(topic['stream_index'])
tres = hash(res)
tres = tres% 10**8
return tres
#----------------------------------------------------
def subscribe_to_topic(t_alias,t_global,t_local,t_clientid, t_streamindex):
#----------------------------------------------------
topic = ioant.get_topic_structure()
topic['top'] = 'live'
topic['global'] = t_global
topic['local'] = t_local
topic['client_id'] = t_clientid
#topic['message_type'] = ioant.get_message_type(msgt)
topic['stream_index'] = str(t_streamindex)
writeSubscription(topic,t_alias)
print("Subscribe to: ", str(topic))
ioant.subscribe(topic)
return
#----------------------------------------------------
def intent_request(req):
#----------------------------------------------------
global tValue
global aliasToHash
global aliasToTopic
""" Handles and responds on webhook request from API.ai """
action = req.get("result").get("action")
print("request ioant action:", action)
topic = ioant.get_topic_structure()
#topic['global'] = configuration["api"]["ai"]["global"]
#topic['local'] = configuration["publish_topic"]["CPUtemp"]["local"]
topic['client_id'] = "bot1"
#topic['stream_index'] = configuration["publish_topic"]["CPUtemp"]["stream_index"]
#----------------------------------------------------
if action == "heater.increase":
#----------------------------------------------------
steps = int(req.get("result").get("parameters").get("steps"))
if steps < 1:
steps = 1
if steps > 20:
steps = 20
topic['global'] = "kil"
topic['local'] = "kvv32"
topic['client_id'] = "D1"
msg = ioant.create_message("RunStepperMotorRaw")
msg.direction = msg.COUNTER_CLOCKWISE
msg.delay_between_steps = 5
msg.number_of_step = steps
msg.step_size = msg.FULL_STEP
action_text = "Warmer " + str(msg.number_of_step)
ioant.publish(msg,topic)
#----------------------------------------------------
elif action == "heater.decrease":
#----------------------------------------------------
steps = int(req.get("result").get("parameters").get("steps"))
if steps < 1:
steps = 1
if steps > 20:
steps = 20
topic['global'] = "kil"
topic['local'] = "kvv32"
topic['client_id'] = "D1"
msg = ioant.create_message("RunStepperMotorRaw")
msg.direction = msg.CLOCKWISE
msg.delay_between_steps = 5
msg.number_of_step = steps
msg.step_size = msg.FULL_STEP
action_text = "Cooler " + str(msg.number_of_step)
ioant.publish(msg,topic)
#----------------------------------------------------
elif action == "mqtt.subscribe":
#----------------------------------------------------
t_alias = str(req.get("result").get("parameters").get("alias"))
topic['global'] = str(req.get("result").get("parameters").get("global"))
topic['local'] = str(req.get("result").get("parameters").get("local"))
topic['client_id'] = str(req.get("result").get("parameters").get("clientid"))
topic['stream_index'] = str(req.get("result").get("parameters").get("streamindex"))
#aliasToHash[topic['alias']] = getTopicHash(topic)
subscribe_to_topic(t_alias,topic['global'],topic['local'],topic['client_id'],topic['stream_index'])
action_text = "Subscribe to " + str(topic) + " " + t_alias
#----------------------------------------------------
elif action == "show.value":
#----------------------------------------------------
t_alias = str(req.get("result").get("parameters").get("alias"))
value = readValue(t_alias)
action_text = t_alias + " " + str(value)
else:
return {}
print("Action chosen:" + action_text)
# Dict that will be returned as JSON to API.ai
return {
"speech": action_text,
"displayText": action_text,
# "data": data,
# "contextOut": [],
"source": ioant.get_configuration()["app_name"]
}
#=====================================================
def setup(configuration):
#=====================================================
""" setup function """
ioant.setup(configuration)
thread.start_new_thread(server.init_server,(configuration["web_server"]["port"],
intent_request))
print("Setup Done")
#=====================================================
def loop():
#=====================================================
global tValue
#global hashToAlias
global aliasToHash
global aliasToTopic
""" Loop function """
ioant.update_loop()
#=====================================================
def on_message(topic, message):
#=====================================================
#global tValue
#tHash = getTopicHash(topic)
print("Message recieved ...", ioant.get_message_type_name(topic['message_type']))
#if topic["message_type"] == ioant.get_message_type("Trigger"):
t_alias = readAlias(topic)
if "Temperature" == ioant.get_message_type_name(topic['message_type']):
print("Message received of type Temperature")
print("Contains value:" + str(message.value))
writeValue(t_alias,str(message.value))
if "ElectricPower" == ioant.get_message_type_name(topic['message_type']):
print("Message received of type ElectricPower")
print("Contains value:" + str(message.value))
writeValue(t_alias,str(message.value))
#=====================================================
def on_connect():
#=====================================================
""" On connect function. Called when connected to broker """
ioant = IOAnt(on_connect, on_message)
| [
"benny.saxen@gmail.com"
] | benny.saxen@gmail.com |
bd5007f5ea485e97bda7e458055eb62fbd663a8a | 8520c991dc543f5f4e1efe59ab401824173bb985 | /457-circular-array-loop/solution.py | e31ba44f932755442e14be18a59b1756d31061c2 | [] | no_license | katryo/leetcode | d44f70f2853c4f5ea9a462d022feb0f5436c2236 | 0da45559271d3dba687858b8945b3e361ecc813c | refs/heads/master | 2020-03-24T12:04:53.859047 | 2020-02-18T04:27:55 | 2020-02-18T04:27:55 | 142,703,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,668 | py | class Solution(object):
def circularArrayLoop(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
def next(idx):
return (idx + nums[idx]) % len(nums)
for i in range(len(nums)):
if nums[i] == 0:
continue
pslow = i
pfast = next(pslow)
npfast = next(pfast)
while nums[i] * nums[pfast] > 0 and nums[i] * nums[next(pfast)] > 0:
if pfast == pslow:
if next(pslow) == pslow:
break
return True
pfast = next(next(pfast))
pslow = next(pslow)
j = i
while nums[j] != 0:
nums[j] = 0
nxt = next(j)
j = nxt
return False
# You are given an array of positive and negative integers.
# If a number n at an index is positive, then move forward n steps.
# Conversely, if it's negative (-n), move backward n steps.
# Assume the first element of the array is forward next to the last element,
# and the last element is backward next to the first element. Determine if there is a loop in this array.
# A loop starts and ends at a particular index with more than 1 element along the loop.
# The loop must be "forward" or "backward'.
if __name__ == '__main__':
s = Solution()
print(s.circularArrayLoop([3, 1, 2]))
print(s.circularArrayLoop([-1]))
print(s.circularArrayLoop([2, -1, 1, -2, -2]))
print(s.circularArrayLoop([-2, 1, -1, -2, -2]))
print(s.circularArrayLoop([2, -1, 1, 2, 2]))
print(s.circularArrayLoop([-1, 2]))
| [
"katoryo55@gmail.com"
] | katoryo55@gmail.com |
d98f3ed569fc29eb0c4c57187c6023849f737681 | d22df6ab8afcf030b92c6813b11cb4f6f1eb2174 | /etude_de_base/ufwi-administration-suite-ufwi-conf/ufwi_conf/backend/components/exim/exim.py | 9518e7893bad6b61379c43510a5f543ed459473c | [] | no_license | maximerobin/Ufwi | 67dbee6c90b21b5a6a1815e3853b9ec8e10747b7 | a516e52535534262fce127d96812b7ded4171707 | refs/heads/master | 2021-01-22T23:16:40.064512 | 2012-03-28T11:12:04 | 2012-03-28T11:12:04 | 3,254,471 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,976 | py | #coding: utf-8
"""
Copyright (C) 2008-2011 EdenWall Technologies
Written by Michael Scherer <m.scherer AT inl.fr>
This file is part of NuFirewall.
NuFirewall is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
NuFirewall is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with NuFirewall. If not, see <http://www.gnu.org/licenses/>
"""
from __future__ import with_statement
import subprocess # TODO remove
from twisted.internet.threads import deferToThread
from twisted.internet.defer import inlineCallbacks, returnValue
from error import NuConfError, MAIL_BAD_CONFIGURATION
from ufwi_rpcd.common import tr
from ufwi_rpcd.backend.exceptions import ConfigError
from ufwi_rpcd.common import EDENWALL
from ufwi_rpcd.core.context import Context
from ufwi_conf.backend.unix_service import (
ConfigServiceComponent,
runCommandAndCheck,
)
from ufwi_conf.common.antispam_cfg import AntispamConf
from ufwi_conf.common.contact_cfg import ContactConf
from ufwi_conf.common.mail_cfg import MailConf
#generated file
GENFILE_HUBBED_HOSTS = '/etc/exim4/hubbed_hosts'
# * relay_domain_in
#Static file
GENFILE_LOCAL_ACL_CONF = '/etc/exim4/local_acl.conf'
#
GENFILE_UPDATE_EXIM4_CONF_CONF = '/etc/exim4/update-exim4.conf.conf'
# * smarthost
# * dc_relay_domain: can be empty
# * dc_relay_nets: can be empty
#Static file
GENFILE_MACRODEFS = '/etc/exim4/conf.d/main/01_exim4-config_listmacrosdefs-local'
# references the 2 following local_acl_antispam.conf & local_acl_antivirus.conf
GENFILE_LOCAL_ACL_ANTISPAM = '/etc/exim4/local_acl_antispam.conf'
#generated file
GENFILE_LOCAL_ACL_ANTIVIRUS = '/etc/exim4/local_acl_antivirus.conf'
# * use_antivirus
GENFILE_MAILNAME = '/etc/mailname'
GEN_FILES = (
GENFILE_HUBBED_HOSTS,
GENFILE_LOCAL_ACL_CONF,
GENFILE_MACRODEFS,
GENFILE_LOCAL_ACL_ANTISPAM,
GENFILE_LOCAL_ACL_ANTIVIRUS,
GENFILE_MAILNAME,
GENFILE_UPDATE_EXIM4_CONF_CONF,
)
class EximComponent(ConfigServiceComponent):
"""
Manage the basic configuration of a exim mail server
"""
NAME = "exim"
VERSION = "1.0"
PIDFILE = "/var/run/exim4/exim.pid"
EXE_NAME = "exim4"
INIT_SCRIPT = 'exim4'
REQUIRES = ('config', 'ufwi_conf', 'hosts', 'hostname')
if EDENWALL:
REQUIRES += ('antispam', )
# not used
CONFIG = {}
CONFIG_DEPENDS = frozenset(('antivirus', 'antispam', 'hostname', 'hosts'))
ACLS = {
'antispam': set(('getAntispamConfig',)),
'antivirus': set(('use',)),
'CORE': set(('hasComponent',)),
'hostname': set(('getShortHostname',)),
'hosts': set(('getFqdn',)),
}
ROLES = {
'conf_read': set(('getMailConfig', 'status')),
'conf_write': set(('setMailConfig',)),
}
check_relay_host = ConfigServiceComponent.check_ip_or_domain
check_virus_scan = ConfigServiceComponent.check_boolean
def __init__(self):
self.config = None
ConfigServiceComponent.__init__(self)
def init(self, core):
ConfigServiceComponent.init(self, core)
for genfile in GEN_FILES:
self.addConfFile(genfile, 'root:root', '0644')
def read_config(self, responsible, *args, **kwargs):
self.config = MailConf.defaultConf()
try:
serialized = self.core.config_manager.get(self.NAME)
except ConfigError:
self.debug("Not configured, defaults loaded.")
return
config = MailConf.deserialize(serialized)
valid, error = config.isValidWithMsg()
if valid:
self.config = config
else:
self.error(
"Component %s read incorrect values. Message was: %s" % (self.NAME, error)
)
def save_config(self, message, context):
serialized = self.config.serialize()
with self.core.config_manager.begin(self, context) as cm:
try:
cm.delete(self.NAME)
except:
pass
cm.set(self.NAME, serialized)
cm.commit(message)
def should_run(self, responsible):
return True
@inlineCallbacks
def genConfigFiles(self, responsible):
templates_variables = {}
for attr in MailConf.ATTRS:
templates_variables[attr] = getattr(self.config, attr)
context = Context.fromComponent(self)
fqdn = yield self.core.callService(context, 'hosts', 'getFqdn')
responsible.feedback(tr("Default FQDN is %(FQDN)s"), FQDN=fqdn)
hostname = yield self.core.callService(
context, 'hostname', 'getShortHostname'
)
responsible.feedback(
tr("Default hostname is %(HOSTNAME)s"), HOSTNAME=hostname
)
templates_variables.update({'fqdn': fqdn, 'hostname': hostname})
templates_variables.update(self._getrelayed())
yield self.addAntispamConfig(context, templates_variables, responsible)
self.generate_configfile(templates_variables)
yield self.updateConf(responsible)
def updateConf(self, responsible):
yield deferToThread(runCommandAndCheck, self,
("/usr/sbin/update-exim4.conf",))
@inlineCallbacks
def addAntispamConfig(self, context, templates_variables, responsible):
try:
serialized_antispam_cfg = yield self.core.callService(context,
'antispam', 'getAntispamConfig')
antispam_cfg = AntispamConf.deserialize(serialized_antispam_cfg)
except Exception, err:
self.writeError(err)
responsible.feedback(tr("Unreadable antispam configuration"))
use_antispam = False
else:
use_antispam = antispam_cfg.use_antispam
if not use_antispam:
templates_variables['use_antispam'] = False
responsible.feedback(tr("Not configured as an antispam system."))
return
templates_variables['use_antispam'] = True
responsible.feedback(tr("Configuring as an antispam system."))
mark_spam_level = float(antispam_cfg.mark_spam_level)
responsible.feedback(tr("Spam mark level: %(LEVEL)s"), LEVEL=mark_spam_level)
templates_variables['mark_spam_level'] = int(10 * mark_spam_level)
deny_spam_level = float(antispam_cfg.deny_spam_level)
responsible.feedback(tr("Spam rejection level: %(LEVEL)s"), LEVEL=deny_spam_level)
templates_variables['deny_spam_level'] = int(10 * deny_spam_level)
def service_getrelayed(self, context):
"""
pre-format relay_domains var
"""
return self._getrelayed()
def _getrelayed(self):
dc_relay_domains = self.config.relay_domain_in
dc_relay_nets = self.config.relay_net_out
if not dc_relay_domains:
dc_relay_domains = ''
else:
dc_relay_domains = \
"'%s'" % ":".join((unicode(domain) for domain in dc_relay_domains))
if not dc_relay_nets:
dc_relay_nets = ''
else:
dc_relay_nets = \
"'%s'" % ":".join((net.strNormal() for net in dc_relay_nets))
return {
'dc_relay_domains': dc_relay_domains,
'dc_relay_nets': dc_relay_nets
}
def get_ports(self):
ports = [ {'proto':'tcp', 'port': 25} ]
return ports
# services
def service_getMailConfig(self, context):
return self.config.serialize()
def service_setMailConfig(self, context, serialized, message):
config = MailConf.deserialize(serialized)
if config.getReceivedSerialVersion() != 1:
raise NuConfError(
MAIL_BAD_CONFIGURATION,
"Incompatible version: %s" % config.getReceivedSerialVersion()
)
valid, error = config.isValidWithMsg()
if not valid:
raise NuConfError(
MAIL_BAD_CONFIGURATION,
"'%s' failed : '%s'" % (valid, error)
)
self.config = config
self.save_config(message, context)
serialized = self.core.config_manager.get(self.NAME)
defer = self.core.callService(context, 'CORE', 'hasComponent', 'antivirus')
defer.addCallback(self._use_antivirus, context)
defer.addErrback(self.writeError)
return defer
def _use_antivirus(self, has_component, context):
if has_component:
defer = self.core.callService(context, 'antivirus', 'use', self.NAME, self.config.use_antivirus)
return defer
else:
self.debug('antivirus component not available')
# Not used yet
#def service_searchLogs(self, context, string):
# """
# Search the logs for the specified string
# """
# return deferToThread(self.search_log, string)
#def search_log(self, string):
# return subprocess.Popen(["/usr/sbin/exigrep", string, '/var/log/exim4/mainlog'], stdout=subprocess.PIPE).communicate()[0]
#def service_searchMailQueue(self, context, string):
# """
# Search the current mail queue for the specified string
# """
# return deferToThread(self.search_queue, string)
#def search_queue(self, string):
# return subprocess.Popen(["/usr/sbin/exiqgrep",string], stdout=subprocess.PIPE).communicate()[0]
def service_restart(self, context):
self.manage_clamav(context)
return ConfigServiceComponent.service_restart(self, context)
def manage_clamav(self, context):
if self.old_clamav_config == self.CONFIG['virus_scan']:
return
if self.old_clamav_config:
self.core.callServiceSync(context, "Clamav", "decrementUsageCount")
else:
self.core.callServiceSync(context, "Clamav", "incrementUsageCount")
self.old_clamav_config = self.CONFIG['virus_scan']
def service_start(self, context):
if self.CONFIG['virus_scan']:
self.core.callServiceSync(context, "Clamav", "decrementUsageCount")
self.old_clamav_config = self.CONFIG['virus_scan']
return ConfigServiceComponent.service_start(self, context)
def service_stop(self, context):
if self.old_clamav_config:
self.core.callServiceSync(context, "Clamav", "decrementUsageCount")
return ConfigServiceComponent.service_stop(self, context)
#@inlineCallbacks
#def service_status(self, context):
# ret = yield self.core.callService(context, 'contact', 'status')
# ret = (self.NAME, ret[1])
# returnValue(ret)
| [
"mdp.robin@gmail.com"
] | mdp.robin@gmail.com |
84c8bc28d069f1510ef5a0bc2b52bc09cf9b9b9e | 40ab5bb935cf055413c2439dd12891ec58da0b24 | /main/views.py | e4ca63fa270eec41f607f73ddc8922a4b0e99a0c | [] | no_license | sanketnakhate26/stockManager | 33e58030ee13d28fa7ddd91b043c8037b1f5ea07 | 6fc0456a14ead50b70f802b74774adb9287dfa57 | refs/heads/master | 2022-12-07T00:58:42.735860 | 2020-08-24T11:49:55 | 2020-08-24T11:49:55 | 265,965,668 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,992 | py | from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from main.models import Overview
from main.models import History
from .forms import OverviewForm
from django.db.models import Sum
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from django.http import QueryDict
Global_user = ""
def calculateTax(price, type):
tax = 0
if type == "delivery":
tax = price * 0.001063
return tax
else:
if price > 40000:
tax = 20 + price * 0.000085
return tax
else:
tax = price * (0.000781)
return tax
@login_required(login_url="/login/")
def overview(request):
print("In overview",Global_user)
all_overview = Overview.objects.all()
context = {'all_overview': all_overview}
form = OverviewForm()
if request.method == 'POST':
if 'create_post' in request.POST:
# new_instance = dict(request.POST)
# new_instance['username']="admin"
# form=QueryDict(new_instance)
form = OverviewForm(request.POST)
# form = new_instance
if form.is_valid():
form.save()
form = OverviewForm()
if 'update_post' in request.POST:
data_test = {}
# print('update post')
if 'sell_post' in request.POST:
target_id = request.POST['stock_id']
sellprice = request.POST['sell_stock']
login_username = request.POST['login_username']
instance = Overview.objects.get(id=target_id)
total_tax = calculateTax( (instance.buy_price + float(sellprice)), instance.trade_type)
record = History(name=instance.name, quantity=instance.quantity, buy_price=float(instance.buy_price), sell_price=float(sellprice), trade_type=instance.trade_type, username=login_username, date_buy=instance.date_buy, tax=total_tax, profit_loss=((instance.quantity * (float(sellprice)-float(instance.buy_price)))- total_tax))
record.save()
instance.delete()
if 'delete_post' in request.POST:
target_id = request.POST['stock_id']
instance = Overview.objects.get(id=target_id)
instance.delete()
context = {'form': form,'all_overview': all_overview}
return render(request,'main/overview.html',context)
@login_required(login_url="/login/")
def history(request):
data = History.objects.all()
# Total_profit_loss = History.objects.aggregate(Sum('profit_loss'))['profit_loss__sum']
# print(Total_profit_loss)
Total_profit_loss = 0
context = {'data': data, 'Total_profit_loss': Total_profit_loss}
return render(request,'main/history.html',context)
def user_login(request):
context = {}
if request.method == "POST":
username= request.POST['username']
password= request.POST['password']
user = authenticate(request,username=username,password=password)
if user:
login(request,user)
Global_user = user.username
print(user.username)
print(type(Global_user))
if request.GET.get('next',None):
return HttpResponseRedirect(request.GET['next'])
return HttpResponseRedirect('/')
else:
context["error"]= "Provide Valid Credentials"
return render(request,"auth/login.html",context)
else:
return render(request,"auth/login.html",context)
@login_required(login_url="/login/")
def success(request):
context = {}
context['user'] = request.user
return render(request, "auth/success.html", context)
@login_required(login_url="/login/")
def user_logout(request):
if request.method =="POST":
logout(request)
return HttpResponseRedirect(reverse('user_login'))
| [
"snakhate@cisco.com"
] | snakhate@cisco.com |
945e6af56bb2a8322e9583389d14c9810cdc23a9 | 59976760649d2b28c28523140c1457232d4108c9 | /hacker-rank/algorithms/PalindromeIndex.py | 00adae5bb02a68ff3f8a63a255f56fc1ff29a6f5 | [
"MIT"
] | permissive | masharp/algorithms-n-structures | b0149517889800b0d3ff3c8ec2bc1c8e699138a0 | 97c360f9d295b6eedb5f951ffffe35ea0083d024 | refs/heads/master | 2023-03-03T20:49:10.174660 | 2023-03-01T00:17:26 | 2023-03-01T00:17:26 | 37,022,730 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | """
* Palindrome Index (Python)
* HackerRank Algorithm Challenge
* https://www.hackerrank.com/challenges/palindrome-index
*
* NOTE: Currently passes half of test cases. Times out on large cases
"""
testA = 'aaab';
testB = 'baa';
testC = 'aaa';
testD = 'hgygsvlfcwnswtuhmyaljkqlqjjqlqkjlaymhutwsnwcwflvsgygh';
testQ = 'abcdefg';
def checkPalindrome(str):
for x in xrange(0, len(str)):
if str[x] != str[(len(str) - 1) - x] and x != (len(str) / 2) - 1: return False
return True;
def findPalindromeIndex(input):
if checkPalindrome(input): return -1
for x in xrange(0, len(input)):
text = input[:]
text = text[:x] + text[x + 1:]
if checkPalindrome(text): return x
result = findPalindromeIndex(testD)
print result
| [
"msharp.oh@gmail.com"
] | msharp.oh@gmail.com |
ae2edc8589f0c988e83769e45b4b2f1c038156ac | 8abc4260e0c387b9b4f79bee86b1ed07d356f843 | /main/migrations/0011_auto_20151109_2018.py | 0eefd01035011bf4d755164a1c296c0af3909801 | [] | no_license | justinhammer/Django_States | 4daae335ce52f2a91bfaa54c2a32408595b2356c | 61be25c01a90174be379d21d3de0b1aba9edb9c2 | refs/heads/master | 2021-01-10T09:32:36.086167 | 2015-11-17T20:37:55 | 2015-11-17T20:37:55 | 44,116,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0010_auto_20151012_1918'),
]
operations = [
migrations.AlterField(
model_name='state',
name='name',
field=models.CharField(db_index=True, max_length=255, null=True, blank=True),
),
]
| [
"justhamr@gmail.com"
] | justhamr@gmail.com |
cabf485b998ad1b05d897d637356a88473eb549d | 1fae5832045750882e0ec72132210adba186292f | /algorithms/226.py | 8361f40692dc84ebc8eccdaa3bb679d87b6401f7 | [] | no_license | JmeHsieh/leetcode | 44c15577f2a48e6d610ee8571448a46fe659a1e3 | ab8511e39d302c087c300eac416aa3e27e5876c5 | refs/heads/master | 2021-01-12T16:15:34.629313 | 2016-10-26T03:22:03 | 2016-10-26T03:22:03 | 71,962,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | # https://leetcode.com/problems/invert-binary-tree
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if root is None:
return None
self.invertTree(root.left)
self.invertTree(root.right)
temp = root.left
root.left = root.right
root.right = temp
return root
| [
"jmehsieh@gmail.com"
] | jmehsieh@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.