blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b19f24fb25557d90c289eb5bc08d12d6d4953a00 | 8d46cf39d641244cd8a4bd368779d91ff75dbf78 | /map_neigh/migrations/0002_auto_20200122_1357.py | 46c1d0574bc3a16a02756d7a879be70318233112 | [] | no_license | RanneKapcie/nofences | 9242d8e9bfe740c20ddf9d2b0393eabf10ee5c3a | a4c3ee5e8f1c546c505e6ffa5a294d0c9b3d5e79 | refs/heads/master | 2020-12-20T05:33:00.225046 | 2020-04-23T13:35:52 | 2020-04-23T13:35:52 | 235,978,087 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | # Generated by Django 2.2b1 on 2020-01-22 13:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('map_neigh', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='customusermodel',
name='address',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='map_neigh.Buildings'),
),
]
| [
"kurasz.leszek@gmail.com"
] | kurasz.leszek@gmail.com |
c5daf96e1ec9ac90dc1db252619f073fb6d4df6d | 179a0f995f5a3eb7a6005f8e96498ef21b2bf166 | /docs/conf.py | 45ccdf6d81b9baf63f859bf4fc96836c47707904 | [
"MIT"
] | permissive | VB6Hobbyst7/pycatia | 845052a4584318bf0cf0861512203ddd337a7bca | cff309fe2b4802ff2b2c5c984f8064747f81065d | refs/heads/master | 2023-04-14T20:28:51.427101 | 2021-04-27T11:03:42 | 2021-04-27T11:03:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,936 | py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from unittest.mock import MagicMock
sys.path.insert(0, os.path.abspath('..'))
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['pywin32', 'win32com.client', 'pywintypes']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# -- Project information -----------------------------------------------------
project = 'pycatia'
copyright = '2020, Paul Bourne'
author = 'Paul Bourne'
# The short X.Y version
version = '0.5.0'
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.todo',
'sphinx_togglebutton',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'github_user': 'evereux',
'github_repo': 'pycatia',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_theme_path = []
html_css_files = [
'css/pycatia.css',
]
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pycatiadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pycatia.tex', 'pycatia Documentation',
'Paul Bourne', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pycatia', 'pycatia Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pycatia', 'pycatia Documentation',
author, 'pycatia', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
| [
"evereux@gmail.com"
] | evereux@gmail.com |
87cfca3de3f6d49048d1a833f67859a87f6d1a0b | 1d35b5254d8b13773ddc58d48092b9e92e049a04 | /10K-NearestNeighbour.py | e9256fa591c9cb24dd7d5cdcd584f026e60f45a8 | [] | no_license | ngthotuan/LearnOpenCV | f7b9cf9e3455f7eba1f4c263a25ec3e0a45bf8c9 | 34e9b0be292e54f5cd39802ca9ccf0d616e7275c | refs/heads/master | 2022-12-29T10:13:58.666053 | 2020-10-15T15:39:21 | 2020-10-15T15:39:21 | 304,267,035 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
n = 50
trainData = np.random.randint(0, 100, [n, 2]).astype(np.float32)
color = np.random.randint(0, 2, n).astype(np.float32)
red = trainData[color == 1]
blue = trainData[color == 0]
plt.scatter(red[:, 0], red[:, 1], 100, 'r', '<')
plt.scatter(blue[:, 0], blue[:, 1], 100, 'b', '^')
newMember = np.random.randint(0, 100, [1, 2]).astype(np.float32)
plt.scatter(newMember[:, 0], newMember[:, 1], 100, 'g', 's')
knn = cv.ml.KNearest_create()
knn.train(trainData, cv.ml.ROW_SAMPLE, color)
ret, results, neighbours, dist = knn.findNearest(newMember, 3)
print("result: {}\n".format(results))
print("neighbours: {}\n".format(neighbours))
print("distance: {}\n".format(dist))
plt.show()
| [
"ngthotuan@gmail.com"
] | ngthotuan@gmail.com |
ff0772f888c25c316c0745c323b99f6f4d5bc740 | 66e0c0d863d2c8781600f7be443a7ef728886478 | /zo/log/_level.py | 0a2f2535c1335ddfc59a8dc3ef20116d5a23d8ca | [] | no_license | daiooo/zo | 2257007c3f8b3ecdf444ab9f60ee0f3ce0e6eae7 | a087674ff8f1e3f006869721138b13a76973b766 | refs/heads/master | 2021-04-23T11:00:29.723706 | 2020-04-03T08:42:50 | 2020-04-03T08:42:50 | 249,921,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | critical = 'critical'
error = 'error'
warning = 'warning'
info = 'info'
debug = 'debug'
trace = 'trace'
| [
"dai.z2@dai3.com"
] | dai.z2@dai3.com |
2df161b5e78c9a4e8785bf57251bffd0a81699a4 | 769daa54ddd3b29c9315f4f7b98adc0cc905dc42 | /backend/vfriendo/wsgi.py | 9425c74a21479899304cd5273234977580191b27 | [] | no_license | JoosepAlviste/vfriendo | 59714f94aee024a1a39dee4cf9f7401e1f5bc970 | 70553daaffe0fb2689259b33a6bca2ffea6752a2 | refs/heads/master | 2020-04-08T01:57:38.894106 | 2018-11-25T07:56:15 | 2018-11-25T07:56:15 | 158,916,274 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | """
WSGI config for vfriendo project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vfriendo.settings')
application = get_wsgi_application()
| [
"joosep.alviste@gmail.com"
] | joosep.alviste@gmail.com |
792e7719059f16c21235baf5e55607f71e6a86f2 | 96dae2618033385c35aa47fb81f6df6caa23080d | /post/admin.py | a203d538bd55fdf2ec4c804e26948bc18ee17e55 | [] | no_license | syuuuu/4a6g0002-post | 353bd8c27e872f1c0c83b84152a4fe6e77c498ec | 200acaa7fcf41e8cc27981433ca5d67b65694257 | refs/heads/main | 2023-06-02T23:55:56.972786 | 2021-06-11T13:06:16 | 2021-06-11T13:06:16 | 376,026,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | from django.contrib import admin
from .models import Post
class PostAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'content')
search_fields = ('title', 'content')
admin.site.register(Post, PostAdmin) | [
"4a6g0002@stust.edu.tw"
] | 4a6g0002@stust.edu.tw |
abeee8d0392b7afe75264c15ed88f63325e041ff | 37ffba8fbfd0c41c43244359694d177e528ae0e7 | /apps/soundcloud/forms.py | f20c3b5f19233bb0d404cc733889bcb8d6d20740 | [] | no_license | cherylchoon/soundcloud_clone | 714fccdaee3e83bf8f32caede5e9ba7021d32913 | adf6efa4c78319e76064fb5186471c336baefee0 | refs/heads/master | 2021-01-23T05:36:01.058354 | 2017-05-10T16:31:07 | 2017-05-10T16:31:07 | 92,976,230 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,453 | py | from django import forms
from ..loginandreg.models import User
from ..upload.models import *
from .models import Comment
from django.utils.translation import ugettext_lazy as _
class PlaylistForm(forms.ModelForm):
class Meta:
model = Playlist
fields = ['name', 'genre', 'description']
widgets = {
'description': forms.Textarea(attrs={'rows': 1, 'cols': 20})
}
labels = {
'comment':_(''),
}
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['comment']
widgets = {
'comment': forms.Textarea(attrs={'rows': 2, 'cols': 40})
}
labels = {
'comment':_(''),
}
class UpdateForm(forms.Form):
GENDER_CHOICES = (
('', '--------'),
('M', 'Male'),
('F', 'Female'),
('O', 'Other')
)
name = forms.CharField(required=False, max_length=45)
picture = forms.FileField(required=False)
email = forms.CharField(required=False)
age = forms.IntegerField(required=False)
gender = forms.ChoiceField(required=False, choices=GENDER_CHOICES)
description = forms.CharField(required=False, widget=forms.Textarea(attrs={'rows': 2, 'cols': 20}))
new_password = forms.CharField(required=False, widget=forms.PasswordInput())
confirm_current_password = forms.CharField(required=True, widget=forms.PasswordInput())
| [
"cherylchoon@Cheryls-MacBook.local"
] | cherylchoon@Cheryls-MacBook.local |
e2ff68a089392acaf328b36ae2b681894f45a8a9 | 3a25c8c80f855b642bd1feaddfc69053e811f00f | /main.py | 67f030da428da6053d30792d03cc9f1fbd84f4b7 | [] | no_license | joshjo/exercises | 2753cad9a5bfaf4c52708c9ec34b6de605027fb1 | 80fec56d08c10ef50d9fbaf8a9ee987411eee905 | refs/heads/master | 2023-06-08T18:09:01.864934 | 2021-07-02T00:14:55 | 2021-07-02T00:14:55 | 382,186,264 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | import re
import rocksdb
from slugify import slugify
db = rocksdb.DB("./dbreverse.db", rocksdb.Options(create_if_missing=True))
WEB = 'web'
LINKEDIN = 'lnkd'
NAME = 'name'
class ReverseIndex:
@staticmethod
def _to_bytes(key):
return str.encode(str(key))
@staticmethod
def set(key, value):
db.put(ReverseIndex._to_bytes(key), ReverseIndex._to_bytes(value))
@staticmethod
def get(key):
value = db.get(ReverseIndex._to_bytes(key))
if value:
return value.decode()
return value
def clean_url(url):
if url.startswith('http'):
url = re.sub(r'https?:\\', '', url)
if url.startswith('www.'):
url = re.sub(r'www.', '', url)
return url
def clean_name(name):
names = name.split(' ')
len_names = len(names)
return [slugify('-'.join(names[:i + 1])) for i in range(len_names)]
def process(id, name, website=None, linkedin=None, *args, **kwargs):
website_value = ReverseIndex.get(f'{WEB}:{website}')
linkedin_value = ReverseIndex.get(f'{LINKEDIN}:{linkedin}')
found = ''
possibly_names = clean_name(name)
if website_value and str(id) != website_value:
print(f'[Company {name} repeated] Same website id:', website_value)
found = WEB
elif linkedin_value and str(id) != linkedin_value:
print(f'[Company {name} repeated] Same linkedin id:', linkedin_value)
found = LINKEDIN
else:
for sub_name in possibly_names:
name_value = ReverseIndex.get(f'{NAME}:{sub_name}')
if name_value and str(id) != name_value:
print(
'[Possibly repeated]',
'name_company:', name,
'found_name:', sub_name,
id, name_value,
)
found = NAME
break
new_id = id
if found == WEB:
new_id = website_value
elif found == LINKEDIN:
new_id = linkedin_value
elif found == NAME:
new_id = name_value
if website:
ReverseIndex.set(f'{WEB}:{website}', new_id)
if linkedin:
ReverseIndex.set(f'{LINKEDIN}:{linkedin}', new_id)
for sub_name in possibly_names:
ReverseIndex.set(f'{NAME}:{sub_name}', new_id)
# This is a mock database, the idea is to get the data from a real database.
companies_db = (
(1, 'Facebook', 'facebook.com', 'https://www.linkedin.com/company/facebook/', '2020-02-02'),
(2, 'SaleMove', 'salemove.com', 'linkedin.com/company/salemove', '2019-03-01'),
(3, 'Glia', 'glia.com', 'linkedin.com/company/salemove', '2021-02-01'),
(4, 'Snapchat', None, None, '2017-01-01'),
(5, 'Snapchat, LLC', None, None, '2017-02-01'),
)
if __name__ == '__main__':
for company in companies_db:
process(*company)
| [
"josue.ttito@ucsp.edu.pe"
] | josue.ttito@ucsp.edu.pe |
8c1605776199c122465a2aa10d3dade49beec409 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02748/s229745856.py | 07e6ff9b4f694f31ed7e0dfb26750d3f2b624a60 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 232 | py | _, _, M = map(int, input().split())
A = list(map(int, input().split()))
B = list(map(int, input().split()))
xyc = [tuple(map(int, input().split())) for i in range(M)]
print(min([min(A)+min(B)]+[A[x-1]+B[y-1]-c for x, y, c in xyc]))
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
48230167eab0fc5f43cae9b6ac5bad38ed650a80 | e2e1e4ee601b4fd86dfca7449989feac94c77a33 | /classic_kernel_k_means.py | 8051ae49ae84f315a5f274078555c973d2ab93e1 | [] | no_license | sajid-r/Kernel-K-Means-Project-2014 | 5b140a3d0e6d96d5feb78497b63d2d1206eefa3a | a0b3bd5a4ba143cfe0e1563894b432a5130b973d | refs/heads/master | 2021-01-19T14:57:22.038775 | 2015-03-13T15:48:08 | 2015-03-13T15:48:08 | 32,164,786 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,755 | py | import numpy as np
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils import check_random_state
from sklearn import metrics
import os, glob
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from time import time
import numpy as np
prev = np.zeros(7095)
class KernelKMeans(BaseEstimator, ClusterMixin):
"""
Kernel K-means
Reference
---------
Kernel k-means, Spectral Clustering and Normalized Cuts.
Inderjit S. Dhillon, Yuqiang Guan, Brian Kulis.
KDD 2004.
"""
def __init__(self, n_clusters=3, max_iter=50, tol=1e-3, random_state=None,
kernel="polynomial", gamma=.0097, degree=2, coef0=3,
kernel_params=None, verbose=0):
self.n_clusters = n_clusters
self.max_iter = max_iter
self.tol = tol
self.random_state = random_state
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.verbose = verbose
@property
def _pairwise(self):
return self.kernel == "precomputed"
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
def fit(self, X, y=None, sample_weight=None):
'''computes the model by calculating centroids for each cluster'''
n_samples = X.shape[0]
K = self._get_kernel(X)
sw = sample_weight if sample_weight else np.ones(n_samples)
self.sample_weight_ = sw
rs = check_random_state(self.random_state)
self.labels_ = rs.randint(self.n_clusters, size=n_samples)
dist = np.zeros((n_samples, self.n_clusters))
self.within_distances_ = np.zeros(self.n_clusters)
for it in xrange(self.max_iter):
dist.fill(0)
self._compute_dist(K, dist, self.within_distances_, update_within=True)
labels_old = self.labels_
self.labels_ = dist.argmin(axis=1)
# Compute the number of samples whose cluster did not change
# since last iteration.
n_same = np.sum((self.labels_ - labels_old) == 0)
if 1 - float(n_same) / n_samples < self.tol:
if self.verbose:
print "Converged at iteration", it + 1
break
self.X_fit_ = X
prev = self.labels_
return self
def _compute_dist(self, K, dist, within_distances, update_within):
"""Compute a n_samples x n_clusters distance matrix using the
kernel trick."""
sw = self.sample_weight_
for j in xrange(self.n_clusters):
mask = self.labels_ == j
if np.sum(mask) == 0:
raise ValueError("Empty cluster found, try smaller n_cluster.")
denom = sw[mask].sum()
denomsq = denom * denom
if update_within:
KK = K[mask][:, mask]
dist_j = np.sum(np.outer(sw[mask], sw[mask]) * KK / denomsq)
within_distances[j] = dist_j
dist[:, j] += dist_j
else:
dist[:, j] += within_distances[j]
dist[:, j] -= 2 * np.sum(sw[mask] * K[:, mask], axis=1) / denom #calculating distance of each point from centroid of cluster j by finding
#diff. b/w centroid of cluster j & similarity of it with points in cluster j
def predict(self, X):
'''Uses the model calculated to predict for each document the closest cluster it belongs to'''
K = self._get_kernel(X, self.X_fit_)
n_samples = X.shape[0]
dist = np.zeros((n_samples, self.n_clusters))
self._compute_dist(K, dist, self.within_distances_,update_within=False)
return dist.argmin(axis=1)
def main():
true_k = 4
labels = []
training_set = []
path = os.getcwd()+'/classicdocs/classic/'
for file in glob.glob(os.path.join(path, '*')):
data = ""
for line in open(file) :
data += line
training_set.append(data)
if 'cacm' in str(file):
labels.append(0)
elif 'cisi' in str(file):
labels.append(1)
elif 'cran' in str(file):
labels.append(2)
elif 'med' in str(file):
labels.append(3)
n_components = 20
print 'Total Samples',len(training_set)
print("Extracting features from the training dataset using a sparse vectorizer")
# Perform an IDF normalization on the output of HashingVectorizer
'''It turns a collection of text documents into a scipy.sparse matrix holding token occurrence counts
This text vectorizer implementation uses the hashing trick to find the token string name to feature integer index mapping.'''
hasher = HashingVectorizer(stop_words='english', non_negative=True,norm=None, binary=False)
'''Transform a count matrix to a normalized tf-idf representation. It provides IDF weighting.'''
vectorizer = make_pipeline(hasher, TfidfTransformer(norm='l2', smooth_idf=True, sublinear_tf=False, use_idf=True))
X = vectorizer.fit_transform(training_set)
if n_components:
print("Performing dimensionality reduction using SVD")
'''This transformer performs linear dimensionality reduction by means of singular value decomposition (SVD)'''
svd = TruncatedSVD(n_components)
lsa = make_pipeline(svd, Normalizer(copy=False))
X = lsa.fit_transform(X)
km = KernelKMeans(n_clusters= 5, max_iter=100, verbose=1)
km.fit_predict(X)
predict = km.predict(X)
print 'Adjusted_Rand_Score',metrics.adjusted_rand_score(labels, predict)
print 'Mutual Info',metrics.adjusted_mutual_info_score(labels, predict)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, predict))
if __name__ == '__main__':
main()
| [
"sajidur1993@gmail.com"
] | sajidur1993@gmail.com |
23cbe79ede4070b03edeae58671d8be72e87e109 | 2d0febf1a63b4c4d5af31e3d5066a599ddb39356 | /search/searchAgents.py | adbd7d065e324787eddd21c886b530fec2ee9926 | [] | no_license | jakenemiroff/Berkeley_AI_Pacman | 902f73c4c93e423ad088661ee40d7a91c2036ade | b13e4722e3be93fd207110e14cf859ab29055513 | refs/heads/master | 2022-07-16T00:51:38.523019 | 2020-05-17T20:15:53 | 2020-05-17T20:15:53 | 264,567,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,027 | py | # searchAgents.py
# ---------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
"""
This file contains all of the agents that can be selected to control Pacman. To
select an agent, use the '-p' option when running pacman.py. Arguments can be
passed to your agent using '-a'. For example, to load a SearchAgent that uses
depth first search (dfs), run the following command:
> python pacman.py -p SearchAgent -a fn=depthFirstSearch
Commands to invoke other search strategies can be found in the project
description.
Please only change the parts of the file you are asked to. Look for the lines
that say
"*** YOUR CODE HERE ***"
The parts you fill in start about 3/4 of the way down. Follow the project
description for details.
Good luck and happy searching!
"""
from game import Directions
from game import Agent
from game import Actions
import util
import time
import search
class GoWestAgent(Agent):
"An agent that goes West until it can't."
def getAction(self, state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
#######################################################
# This portion is written for you, but will only work #
# after you fill in parts of search.py #
#######################################################
class SearchAgent(Agent):
"""
This very general search agent finds a path using a supplied search
algorithm for a supplied search problem, then returns actions to follow that
path.
As a default, this agent runs DFS on a PositionSearchProblem to find
location (1,1)
Options for fn include:
depthFirstSearch or dfs
breadthFirstSearch or bfs
Note: You should NOT change any code in SearchAgent
"""
def __init__(self, fn='depthFirstSearch', prob='PositionSearchProblem', heuristic='nullHeuristic'):
# Warning: some advanced Python magic is employed below to find the right functions and problems
# Get the search function from the name and heuristic
if fn not in dir(search):
raise AttributeError(fn + ' is not a search function in search.py.')
func = getattr(search, fn)
if 'heuristic' not in func.__code__.co_varnames:
print('[SearchAgent] using function ' + fn)
self.searchFunction = func
else:
if heuristic in globals().keys():
heur = globals()[heuristic]
elif heuristic in dir(search):
heur = getattr(search, heuristic)
else:
raise AttributeError(heuristic + ' is not a function in searchAgents.py or search.py.')
print('[SearchAgent] using function %s and heuristic %s' % (fn, heuristic))
# Note: this bit of Python trickery combines the search algorithm and the heuristic
self.searchFunction = lambda x: func(x, heuristic=heur)
# Get the search problem type from the name
if prob not in globals().keys() or not prob.endswith('Problem'):
raise AttributeError(prob + ' is not a search problem type in SearchAgents.py.')
self.searchType = globals()[prob]
print('[SearchAgent] using problem type ' + prob)
def registerInitialState(self, state):
"""
This is the first time that the agent sees the layout of the game
board. Here, we choose a path to the goal. In this phase, the agent
should compute the path to the goal and store it in a local variable.
All of the work is done in this method!
state: a GameState object (pacman.py)
"""
if self.searchFunction == None: raise Exception("No search function provided for SearchAgent")
starttime = time.time()
problem = self.searchType(state) # Makes a new search problem
self.actions = self.searchFunction(problem) # Find a path
totalCost = problem.getCostOfActions(self.actions)
print('Path found with total cost of %d in %.1f seconds' % (totalCost, time.time() - starttime))
if '_expanded' in dir(problem): print('Search nodes expanded: %d' % problem._expanded)
def getAction(self, state):
"""
Returns the next action in the path chosen earlier (in
registerInitialState). Return Directions.STOP if there is no further
action to take.
state: a GameState object (pacman.py)
"""
if 'actionIndex' not in dir(self): self.actionIndex = 0
i = self.actionIndex
self.actionIndex += 1
if i < len(self.actions):
return self.actions[i]
else:
return Directions.STOP
class PositionSearchProblem(search.SearchProblem):
"""
A search problem defines the state space, start state, goal test, successor
function and cost function. This search problem can be used to find paths
to a particular point on the pacman board.
The state space consists of (x,y) positions in a pacman game.
Note: this search problem is fully specified; you should NOT change it.
"""
def __init__(self, gameState, costFn = lambda x: 1, goal=(1,1), start=None, warn=True, visualize=True):
"""
Stores the start and goal.
gameState: A GameState object (pacman.py)
costFn: A function from a search state (tuple) to a non-negative number
goal: A position in the gameState
"""
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
if start != None: self.startState = start
self.goal = goal
self.costFn = costFn
self.visualize = visualize
if warn and (gameState.getNumFood() != 1 or not gameState.hasFood(*goal)):
print('Warning: this does not look like a regular search maze')
# For display purposes
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def getStartState(self):
return self.startState
def isGoalState(self, state):
isGoal = state == self.goal
# For display purposes only
if isGoal and self.visualize:
self._visitedlist.append(state)
import __main__
if '_display' in dir(__main__):
if 'drawExpandedCells' in dir(__main__._display): #@UndefinedVariable
__main__._display.drawExpandedCells(self._visitedlist) #@UndefinedVariable
return isGoal
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples,
(successor, action, stepCost), where 'successor' is a
successor to the current state, 'action' is the action
required to get there, and 'stepCost' is the incremental
cost of expanding to that successor
"""
successors = []
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextState = (nextx, nexty)
cost = self.costFn(nextState)
successors.append( ( nextState, action, cost) )
# Bookkeeping for display purposes
self._expanded += 1 # DO NOT CHANGE
if state not in self._visited:
self._visited[state] = True
self._visitedlist.append(state)
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999.
"""
if actions == None: return 999999
x,y= self.getStartState()
cost = 0
for action in actions:
# Check figure out the next state and see whether its' legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
cost += self.costFn((x,y))
return cost
class StayEastSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the West side of the board.
The cost function for stepping into a position (x,y) is 1/2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: .5 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn, (1, 1), None, False)
class StayWestSearchAgent(SearchAgent):
"""
An agent for position search with a cost function that penalizes being in
positions on the East side of the board.
The cost function for stepping into a position (x,y) is 2^x.
"""
def __init__(self):
self.searchFunction = search.uniformCostSearch
costFn = lambda pos: 2 ** pos[0]
self.searchType = lambda state: PositionSearchProblem(state, costFn)
def manhattanHeuristic(position, problem, info={}):
"The Manhattan distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return abs(xy1[0] - xy2[0]) + abs(xy1[1] - xy2[1])
def euclideanHeuristic(position, problem, info={}):
"The Euclidean distance heuristic for a PositionSearchProblem"
xy1 = position
xy2 = problem.goal
return ( (xy1[0] - xy2[0]) ** 2 + (xy1[1] - xy2[1]) ** 2 ) ** 0.5
#####################################################
# This portion is incomplete. Time to write code! #
#####################################################
class CornersProblem(search.SearchProblem):
"""
This search problem finds paths through all four corners of a layout.
You must select a suitable state space and successor function
"""
def __init__(self, startingGameState):
"""
Stores the walls, pacman's starting position and corners.
"""
self.walls = startingGameState.getWalls()
self.startingPosition = startingGameState.getPacmanPosition()
top, right = self.walls.height-2, self.walls.width-2
self.corners = ((1,1), (1,top), (right, 1), (right, top))
for corner in self.corners:
if not startingGameState.hasFood(*corner):
print('Warning: no food in corner ' + str(corner))
self._expanded = 0 # DO NOT CHANGE; Number of search nodes expanded
# Please add any code here which you would like to use
# in initializing the problem
"*** YOUR CODE HERE ***"
self.startingGameState = startingGameState
self.initialState = []
def getStartState(self):
"""
Returns the start state (in your state space, not the full Pacman state
space)
"""
"*** YOUR CODE HERE ***"
return (self.startingPosition, self.initialState)
# util.raiseNotDefined()
def isGoalState(self, state):
"""
Returns whether this search state is a goal state of the problem.
"""
"*** YOUR CODE HERE ***"
visitedCorners = state[1]
return len(visitedCorners) == 4
# util.raiseNotDefined()
def getSuccessors(self, state):
"""
Returns successor states, the actions they require, and a cost of 1.
As noted in search.py:
For a given state, this should return a list of triples, (successor,
action, stepCost), where 'successor' is a successor to the current
state, 'action' is the action required to get there, and 'stepCost'
is the incremental cost of expanding to that successor
"""
successors = []
x,y = state[0]
corners = state[1]
for action in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
dx, dy = Actions.directionToVector(action)
nextx, nexty = int(x + dx), int(y + dy)
hitsWall = self.walls[nextx][nexty]
# Initialize a list of Visited corners for a successor using the visited corner list in state space.
cornersVisited = list(corners)
nextNode = (nextx, nexty)
if not hitsWall:
# Add node to the Visited corner list if it is a corner and not already in the list
if nextNode in self.corners and not nextNode in cornersVisited:
cornersVisited.append(nextNode)
# Create a new state according to the state space and append it to the successor list.
successorNode = ((nextNode, cornersVisited), action, 1)
successors.append(successorNode)
self._expanded += 1 # DO NOT CHANGE
return successors
def getCostOfActions(self, actions):
"""
Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999. This is implemented for you.
"""
if actions == None: return 999999
x,y= self.startingPosition
for action in actions:
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]: return 999999
return len(actions)
def cornersHeuristic(state, problem):
"""
A heuristic for the CornersProblem that you defined.
state: The current search state
(a data structure you chose in your search problem)
problem: The CornersProblem instance for this layout.
This function should always return a number that is a lower bound on the
shortest path from the state to a goal of the problem; i.e. it should be
admissible (as well as consistent).
"""
corners = problem.corners # These are the corner coordinates
walls = problem.walls # These are the walls of the maze, as a Grid (game.py)
#assign first element of current search state to variable xy
#assign second element of current search state to visitedCorners
#assign heuristicValue an initial value of 0 as per trivial solution
xy = state[0]
visitedCorners = state[1]
remainingCorners = []
heuristicValue = 0
#check all corners in the list of corner coordinates
#if a corner is not in the list of visited corners, add it to the remaining corners list to be used down below
for corner in corners:
if not corner in visitedCorners:
remainingCorners.append(corner)
#check all corners added to remainingCorners list
#if the distance obtained through the mazeDistance function is greater than the heuristic value,
#set the value of heuristicValue to the return value of the mazeDistance function
#return heuristicValue which is the max value returned from inputting each corner into the mazeDistance function
for corner in remainingCorners:
if (mazeDistance(xy, corner, problem.startingGameState) > heuristicValue):
heuristicValue = mazeDistance(xy, corner, problem.startingGameState)
return heuristicValue
"*** YOUR CODE HERE ***"
class AStarCornersAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, cornersHeuristic)
self.searchType = CornersProblem
class FoodSearchProblem:
"""
A search problem associated with finding the a path that collects all of the
food (dots) in a Pacman game.
A search state in this problem is a tuple ( pacmanPosition, foodGrid ) where
pacmanPosition: a tuple (x,y) of integers specifying Pacman's position
foodGrid: a Grid (see game.py) of either True or False, specifying remaining food
"""
def __init__(self, startingGameState):
self.start = (startingGameState.getPacmanPosition(), startingGameState.getFood())
self.walls = startingGameState.getWalls()
self.startingGameState = startingGameState
self._expanded = 0 # DO NOT CHANGE
self.heuristicInfo = {} # A dictionary for the heuristic to store information
def getStartState(self):
return self.start
def isGoalState(self, state):
return state[1].count() == 0
def getSuccessors(self, state):
"Returns successor states, the actions they require, and a cost of 1."
successors = []
self._expanded += 1 # DO NOT CHANGE
for direction in [Directions.NORTH, Directions.SOUTH, Directions.EAST, Directions.WEST]:
x,y = state[0]
dx, dy = Actions.directionToVector(direction)
nextx, nexty = int(x + dx), int(y + dy)
if not self.walls[nextx][nexty]:
nextFood = state[1].copy()
nextFood[nextx][nexty] = False
successors.append( ( ((nextx, nexty), nextFood), direction, 1) )
return successors
def getCostOfActions(self, actions):
"""Returns the cost of a particular sequence of actions. If those actions
include an illegal move, return 999999"""
x,y= self.getStartState()[0]
cost = 0
for action in actions:
# figure out the next state and see whether it's legal
dx, dy = Actions.directionToVector(action)
x, y = int(x + dx), int(y + dy)
if self.walls[x][y]:
return 999999
cost += 1
return cost
class AStarFoodSearchAgent(SearchAgent):
"A SearchAgent for FoodSearchProblem using A* and your foodHeuristic"
def __init__(self):
self.searchFunction = lambda prob: search.aStarSearch(prob, foodHeuristic)
self.searchType = FoodSearchProblem
def foodHeuristic(state, problem):
"""
Your heuristic for the FoodSearchProblem goes here.
This heuristic must be consistent to ensure correctness. First, try to come
up with an admissible heuristic; almost all admissible heuristics will be
consistent as well.
If using A* ever finds a solution that is worse uniform cost search finds,
your heuristic is *not* consistent, and probably not admissible! On the
other hand, inadmissible or inconsistent heuristics may find optimal
solutions, so be careful.
The state is a tuple ( pacmanPosition, foodGrid ) where foodGrid is a Grid
(see game.py) of either True or False. You can call foodGrid.asList() to get
a list of food coordinates instead.
If you want access to info like walls, capsules, etc., you can query the
problem. For example, problem.walls gives you a Grid of where the walls
are.
If you want to *store* information to be reused in other calls to the
heuristic, there is a dictionary called problem.heuristicInfo that you can
use. For example, if you only want to count the walls once and store that
value, try: problem.heuristicInfo['wallCount'] = problem.walls.count()
Subsequent calls to this heuristic can access
problem.heuristicInfo['wallCount']
"""
position, foodGrid = state
"*** YOUR CODE HERE ***"
#assign the list of foodGrid elements to variable pellets
#assign heuristicValue a value of 0 as per trivial solution
pellets = foodGrid.asList()
heuristicValue = 0
#check distance of every pellet (piece of pacman food) in the grid turned list of pellets
#if the distance to that particular pellet is greater than the current heuristic value,
#make that the new heuristicValue - return that value which is the maximum heuristic value
for pellet in pellets:
if (mazeDistance(position, pellet, problem.startingGameState) > heuristicValue):
heuristicValue = mazeDistance(position, pellet, problem.startingGameState)
return heuristicValue
class ClosestDotSearchAgent(SearchAgent):
"Search for all food using a sequence of searches"
def registerInitialState(self, state):
self.actions = []
currentState = state
while(currentState.getFood().count() > 0):
nextPathSegment = self.findPathToClosestDot(currentState) # The missing piece
self.actions += nextPathSegment
for action in nextPathSegment:
legal = currentState.getLegalActions()
if action not in legal:
t = (str(action), str(currentState))
raise Exception('findPathToClosestDot returned an illegal move: %s!\n%s' % t)
currentState = currentState.generateSuccessor(0, action)
self.actionIndex = 0
print('Path found with cost %d.' % len(self.actions))
def findPathToClosestDot(self, gameState):
"""
Returns a path (a list of actions) to the closest dot, starting from
gameState.
"""
# Here are some useful elements of the startState
startPosition = gameState.getPacmanPosition()
food = gameState.getFood()
walls = gameState.getWalls()
problem = AnyFoodSearchProblem(gameState)
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
class AnyFoodSearchProblem(PositionSearchProblem):
"""
A search problem for finding a path to any food.
This search problem is just like the PositionSearchProblem, but has a
different goal test, which you need to fill in below. The state space and
successor function do not need to be changed.
The class definition above, AnyFoodSearchProblem(PositionSearchProblem),
inherits the methods of the PositionSearchProblem.
You can use this search problem to help you fill in the findPathToClosestDot
method.
"""
def __init__(self, gameState):
"Stores information from the gameState. You don't need to change this."
# Store the food for later reference
self.food = gameState.getFood()
# Store info for the PositionSearchProblem (no need to change this)
self.walls = gameState.getWalls()
self.startState = gameState.getPacmanPosition()
self.costFn = lambda x: 1
self._visited, self._visitedlist, self._expanded = {}, [], 0 # DO NOT CHANGE
def isGoalState(self, state):
"""
The state is Pacman's position. Fill this in with a goal test that will
complete the problem definition.
"""
x,y = state
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
def mazeDistance(point1, point2, gameState):
"""
Returns the maze distance between any two points, using the search functions
you have already built. The gameState can be any game state -- Pacman's
position in that state is ignored.
Example usage: mazeDistance( (2,4), (5,6), gameState)
This might be a useful helper function for your ApproximateSearchAgent.
"""
x1, y1 = point1
x2, y2 = point2
walls = gameState.getWalls()
assert not walls[x1][y1], 'point1 is a wall: ' + str(point1)
assert not walls[x2][y2], 'point2 is a wall: ' + str(point2)
prob = PositionSearchProblem(gameState, start=point1, goal=point2, warn=False, visualize=False)
return len(search.bfs(prob))
| [
"jakenemiroff@gmail.com"
] | jakenemiroff@gmail.com |
41b078d3ab88b489b6dd3cf7982d2e7d3dc17c18 | 67e9873f5b71e0329b6b6249283829dd33dba01e | /sales/main_app/initialize_db.py | 94c7d1ffd5c754891fec85690ee5ad7c484c20e1 | [] | no_license | daniel-leinad/subtotal | 07da87ef89273ac372cc26a745c172653e0642a1 | 33df4523986e402fc7642b8be6b9d81bf383bab3 | refs/heads/master | 2022-12-10T02:05:49.449912 | 2020-09-12T13:14:28 | 2020-09-12T13:14:28 | 294,690,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 994 | py | import os
import sys
import transaction
from datetime import datetime
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from .models import (
DBSession,
Sale,
Product,
Base,
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) != 2:
usage(argv)
config_uri = argv[1]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
model = Sale(num=1, date=datetime(2010, 10, 10))
DBSession.add(model)
model = Product(sale_id=1, product='Книга', price=10, amount=1)
DBSession.add(model)
| [
"noreply@github.com"
] | noreply@github.com |
5d62a8b15b3e6cd9a71bfda0160a18e3204ac376 | ae0ad01d2b7432c5f28239c47f341336a439ec3c | /djangotest/djangotest/urls.py | 4c99b83786cf529383bbe21fa4b0a85e69514175 | [
"MIT"
] | permissive | meghaggarwal/Django-Forum | c37feb4567f83d6ad95f25e5b6559118f3f606af | 5bae07ad9545eaf076d58942e5bce8cbad5219fd | refs/heads/master | 2022-11-14T06:36:53.586786 | 2020-07-04T19:40:39 | 2020-07-04T19:40:39 | 274,471,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | """djangotest URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from boards import views as board_views
urlpatterns = [
path('admin/', admin.site.urls),
path('', board_views.home, name='home'),
path('boards/<int:pk>/', board_views.board_topics , name='board_topics'),
path('boards/<int:pk>/new/', board_views.new_topic, name ='new_topic'),
]
| [
"meghaa.2703@gmail.com"
] | meghaa.2703@gmail.com |
572938151b792f0f6e8e2bb10d5c6bd6a452af48 | e5504d8c4880993b82d5583a11c5cc4623e0eac2 | /Arrays/loopInCircularArray__IMP.py | 768b8bdc06fdc4ba7a36f6287179bf3b4b92d756 | [] | no_license | noorulameenkm/DataStructuresAlgorithms | e5f87f426fc444d18f830e48569d2a7a50f5d7e0 | 7c3bb89326d2898f9e98590ceb8ee5fd7b3196f0 | refs/heads/master | 2023-06-08T19:29:42.507761 | 2023-05-28T16:20:19 | 2023-05-28T16:20:19 | 219,270,731 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,536 | py | def circular_array_loop_exists(arr):
# TODO: Write your code here
for i in range(len(arr)):
slow = fast = i
is_forward = arr[i] >= 0
# if slow or fast becomes '-1' this means we can't find cycle for this number
while True:
# move one step for slow pointer
slow = get_next_index(arr, is_forward, slow)
# move one step for fast pointer
fast = get_next_index(arr, is_forward, fast)
if fast != -1:
# move another step for fast pointer
fast = get_next_index(arr, is_forward, fast)
if slow == -1 or fast == -1 or slow == fast:
break
if slow != -1 and slow == fast:
return True
return False
def get_next_index(arr, is_forward, current_index):
direction = arr[current_index] >= 0
if is_forward != direction:
return -1 # change in direction, return -1
next_index = (arr[current_index] + current_index) % len(arr)
# one element cycle, return -1
if next_index == current_index:
next_index = -1
return next_index
def main():
print(circular_array_loop_exists([1, 2, -1, 2, 2]))
print(circular_array_loop_exists([2, 2, -1, 2]))
print(circular_array_loop_exists([2, 1, -1, -2]))
main()
""""
We are given an array containing positive and negative numbers. Suppose the array contains a number ‘M’ at a particular index. Now,
if ‘M’ is positive we will move forward ‘M’ indices and if ‘M’ is negative move backwards ‘M’ indices.
You should assume that the array is circular which means two things:
If, while moving forward, we reach the end of the array, we will jump to the first element to continue the movement.
If, while moving backward, we reach the beginning of the array, we will jump to the last element to continue the movement.
Write a method to determine if the array has a cycle. The cycle should have more than one element and should follow one direction
which means the cycle should not contain both forward and backward movements.
"""
""""
Alternate Method
In our algorithm, we don’t keep a record of all the numbers that have been evaluated for cycles.
We know that all such numbers will not produce a cycle for any other instance as well.
If we can remember all the numbers that have been visited, our algorithm will improve to O(N) as,
then, each number will be evaluated for cycles only once. We can keep track of this by creating a separate
array however the space complexity of our algorithm will increase to O(N).
"""
| [
"noorul.km@people10.com"
] | noorul.km@people10.com |
d1684f57fb28491ecde85c741f45fcd4e4659cf8 | ed9e4027cbd76fbac19598163b9673628cb07eea | /anjia/asgi.py | 372aac7872de8b88fd3e438e294b71fb8dafce32 | [
"BSD-2-Clause"
] | permissive | ankiwoong/python_kindergarten | 3a1f9a486a32866b5f37ba4673dfc2135a85eec0 | 43b1e15969f0d35073e2f7fb1286d8c094fd80a8 | refs/heads/master | 2022-09-01T08:11:27.374802 | 2020-05-27T08:45:14 | 2020-05-27T08:45:14 | 258,760,014 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | """
ASGI config for anjia project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'anjia.settings')
application = get_asgi_application()
| [
"ankiwoong@gmail.com"
] | ankiwoong@gmail.com |
73fbb3301c1acd0601b2b5f5a1dbbe3fd2a828be | 804c9acec2cfe400c80f380fcb18557b1e926478 | /assistant.py | 4e2100750cb9c6aa32f41b9cd02b8289f8198350 | [] | no_license | Harshil78/Python | f8f10f271b936c45241c972922fe2b35f02ae928 | 600b4ff27cfa28b14be9f1281b129516173e862b | refs/heads/master | 2023-06-17T13:07:10.658296 | 2021-07-05T04:11:41 | 2021-07-05T04:11:41 | 379,554,120 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,614 | py | import datetime
import os
import random
import smtplib
import sys
import webbrowser
import pyttsx3
import speech_recognition as sr
engine = pyttsx3.init('sapi5')
# client = wolframalpha.Client('Your_App_ID')
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[len(voices) - 1].id)
def speak(audio):
print('Computer: ' + audio)
engine.say(audio)
engine.runAndWait()
def greetMe():
currentH = int(datetime.datetime.now().hour)
if currentH >= 0 and currentH < 12:
speak('Good Morning!')
if currentH >= 12 and currentH < 18:
speak('Good Afternoon!')
if currentH >= 18 and currentH != 0:
speak('Good Evening!')
greetMe()
speak('Hello Sir, I am Harshil your digital assistant!')
speak('How may I help you?')
def myCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
query = r.recognize_google(audio, language='en-in')
print('User: ' + query + '\n')
except sr.UnknownValueError:
speak('Sorry sir! I didn\'t get that! Try typing the command!')
return query
if __name__ == '__main__':
while True:
query = myCommand().lower()
query = query.lower()
if 'open youtube' in query:
speak('okay')
webbrowser.open('www.youtube.com')
elif 'open google' in query:
speak('okay')
webbrowser.open('www.google.co.in')
elif 'open gmail' in query:
speak('okay')
webbrowser.open('www.gmail.com')
elif "what\'s up" in query or 'how are you' in query:
stMsgs = ['Just doing my thing!', 'I am fine!', 'Nice!', 'I am nice and full of energy']
speak(random.choice(stMsgs))
elif 'email' in query:
speak('Who is the recipient? ')
recipient = myCommand()
if 'me' in recipient:
try:
speak('What should I say? ')
content = myCommand()
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
server.login("Your_Username", 'Your_Password')
server.sendmail('Your_Username', "Recipient_Username", content)
server.close()
speak('Email sent!')
except:
speak('Sorry Sir! I am unable to send your message at this moment!')
elif 'nothing' in query or 'abort' in query or 'stop' in query:
speak('okay')
speak('Bye Sir, have a good day.')
sys.exit()
elif 'hello HD' in query:
speak('Hello Sir')
elif 'bye' in query:
speak('Bye Sir, have a good day.')
sys.exit()
elif 'what time is it now' in query:
strtime = datetime.datetime.now().strftime('%H:%M:%S')
speak(f'sir,the time is {strtime}')
elif 'play music' in query:
music_folder = 'D:\\Songs\\English'
songs = os.listdir(music_folder)
# music = [music1, music2, music3, music4, music5]
# random_music = music_folder + random.choice(music) + '.mp3'
# os.system(random_music)
os.startfile(os.path.join(music_folder, songs[0]))
speak('Okay, here is your music! Enjoy!')
else:
# query = query
speak('Speak Again...')
speak('Next Command! Sir!')
| [
"harshildalal1718@gmail.com"
] | harshildalal1718@gmail.com |
3aefb338c74473c31e9b8b9f5b57d93c9d53d0e5 | 5f957add3e3f7a1885d4f1b106de72e93c8fcb1a | /ExerciciosPython/ex072.py | a2215342cdcfa208efd442734fd5f94405993530 | [
"MIT"
] | permissive | mpatrickaires/curso-python | 6e32cf785a3bc0076bb3ea24cd6d896604f4e774 | aba023648527d53bfe18833b91210a7e528a84d7 | refs/heads/main | 2022-12-27T00:57:07.467940 | 2020-10-14T00:48:09 | 2020-10-14T00:48:09 | 302,203,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | extenso = ('zero', 'um', 'dois', 'três', 'quatro', 'cinco', 'seis', 'sete', 'oito', 'nove', 'dez', 'onze',
'doze', 'treze', 'catorze', 'quinze', 'dezesseis', 'dezessete', 'dezoito', 'dezenove', 'vinte')
while True:
numero = int(input('Digite um número entre 0 e 20: '))
while numero < 0 or numero > 20:
numero = int(input('Tente novamente. Digite um número entre 0 e 20: '))
print(f'Você digitou o número {extenso[numero]}')
continuar = str(input('Deseja continuar? [S/N] ')).strip().upper()
while continuar != 'S' and continuar != 'N':
continuar = str(input('Deseja continuar? [S/N] ')).strip().upper()
if continuar == 'N':
break
| [
"mpatrickaires@gmail.com"
] | mpatrickaires@gmail.com |
bb474b239a91648af44dd841343b2853899bbb38 | da38287bf935ce68321f63f17c24433384312786 | /generate&inference no texons/inference_demo_v1.2.py | 51c6887b269101830e3f04d67e3685f1a59fe4bb | [] | no_license | ningtangla/sceneParsing | 08eb5e58bceba5171e2b60a63e3b30661428e2c3 | 5b425e4203d725ac628c6b43bf1d5fa889eae7e0 | refs/heads/master | 2022-12-25T09:21:53.192613 | 2019-06-18T00:48:19 | 2019-06-18T00:48:19 | 192,429,153 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,323 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 24 13:52:00 2017
@author: Edward Coen
"""
from __future__ import division
from scipy.misc import comb
import math
import scipy.stats
import numpy as np
import networkx as nx
from networkx.algorithms.traversal.depth_first_search import dfs_successors
import itertools as it
import operator as op
import pandas as pd
import cv2
import sys
import itertools
sys.setrecursionlimit(100000000)
"""
enumerate_tree parameter
"""
TREE_NUM = 3
"""
ncrp parameters
"""
GAMMA = 1
ALL_GUEST_NUM = 6
"""
image parameters
"""
IMAGE_WIDTH = 1024
IMAGE_HEIGHT = 768
COLOR_SPACE = [[128,128,128],[0,0,255],[0,255,0],[255,0,0],[0,255,255],[255,0,255],[255,255,0],[255,255,255]]
GRAPH_WIDTH = 500
"""
Dirchlet parmeters
"""
ALPHA_BASE = [20]
"""
code parameters
"""
"""
global arguements
"""
CODE_BASE = 10 #decimal
class decoder():
def __init__(self, code_base):
self.code_base = code_base
def decode_princeple(self, all_guest_num):
curr_table_guest_num = self.code_tree[0]
self.code_tree = self.code_tree[1:]
self.curr_guest_num = self.curr_guest_num + int(curr_table_guest_num)
self.table_partion_list[len(self.table_partion_list) - 1].append(int(curr_table_guest_num))
if int(curr_table_guest_num) != 1:
self.all_guest_num_list.append(int(curr_table_guest_num))
if self.curr_guest_num == all_guest_num:
self.table_partion_list.append([])
self.curr_guest_num = 0
return
else:
return self.decode_princeple(all_guest_num)
def make_decode_list(self, code_tree):
self.code_tree = code_tree
self.table_partion_list = [[]]
self.all_guest_num_list = [ALL_GUEST_NUM]
self.curr_guest_num = 0
map(self.decode_princeple, self.all_guest_num_list)
del self.table_partion_list[-1]
self.all_table_partion_list.append(self.table_partion_list)
def __call__(self):
self.code_tree_list = list(pd.read_csv('E:/ncrp_generate/tree_kind_num_' + str(ALL_GUEST_NUM) + '.csv')['tree_code'])
self.code_tree_list = map(str, self.code_tree_list)
self.all_table_partion_list = []
map(self.make_decode_list, self.code_tree_list)
return self.all_table_partion_list
class prior():
def __init__(self, all_table_partion_list):
self.all_table_partion_list = all_table_partion_list
def cal_renomalize_parameter(self, table_partion):
return 1/(1 - 1/np.array(table_partion).sum())
def cal_probability_table_partion(self, table_partion):
return reduce(op.mul, map(math.factorial, (np.array(table_partion) - 1)))/math.factorial(np.array(table_partion).sum())
def cal_permutation_table_partion(self, table_partion):
return list(set(list(itertools.permutations(table_partion))))
def cal_all_combination_guest(self, permutation_table_partion):
return reduce(op.add, map(self.cal_permutation_combination_guest, permutation_table_partion))
def cal_permutation_combination_guest(self, table_partion):
self.guest_left = np.array(table_partion).sum()
return reduce(op.mul, map(self.cal_combination_guest, table_partion))
def cal_combination_guest(self, table_guest_num):
combination_num = round(comb(self.guest_left - 1, table_guest_num - 1))
self.guest_left = self.guest_left - table_guest_num
return combination_num
def cal_prior_probability(self, table_partion_list):
probability_table_partion = map(self.cal_probability_table_partion, table_partion_list[1:])
permutation_table_partion = map(self.cal_permutation_table_partion, table_partion_list[1:])
all_combination_guest = map(self.cal_all_combination_guest, permutation_table_partion)
renomalize_parameter = map(self.cal_renomalize_parameter, table_partion_list[1:])
# print reduce(op.mul, np.array(probability_table_partion)*np.array(all_combination_guest)*np.array(renomalize_parameter))
return reduce(op.mul, np.array(probability_table_partion)*np.array(all_combination_guest)*np.array(renomalize_parameter))
def __call__(self):
return map(self.cal_prior_probability, self.all_table_partion_list)
class likelihood():
def __init__(self, all_table_partion_list, color_space, alpha_base):
self.all_table_partion_list = all_table_partion_list
self.alpha_base = alpha_base
self.color_space = color_space
def find_all_vertex(self, color_space):
self.all_vertex_list = []
map(self.find_vertex, color_space)
def find_vertex(self, color):
lower = np.array(color, dtype = "uint8")
upper = np.array(color, dtype = "uint8")
mask = cv2.inRange(self.img, lower, upper)
index = np.argwhere(mask == 255)
if len(index) != 0:
self.all_vertex_list.extend([index.min(axis = 0), index.max(axis = 0)])
def detect_cut_point_list(self, vertex):
cut_point_list = []
cut_propotion_list = []
new_vertex_list = []
if len(vertex) != 0:
min_x, min_y = np.array(vertex).min(axis = 0)
max_x, max_y = np.array(vertex).max(axis = 0)
all_vertex_array = np.array(self.all_vertex_list)
x_vertex_array = all_vertex_array[:, 1]
y_vertex_array = all_vertex_array[:, 0]
inner_vertex_array = all_vertex_array[(np.where((x_vertex_array >= min_x) & (x_vertex_array <= max_x))) or (np.where((y_vertex_array >= min_y) & (y_vertex_array <= max_y)))]
inner_vertex_list = map(tuple, inner_vertex_array)
inner_vertex_array = np.array(list(set(inner_vertex_list).difference(set([(min_y, min_x), (max_y, max_x)]))))
if len(inner_vertex_array) == 0:
vertx_array_y = []
vertx_array_x = []
else:
inner_x_vertex_array = inner_vertex_array[:, 1]
inner_y_vertex_array = inner_vertex_array[:, 0]
# print inner_x_vertex_array, inner_y_vertex_array
x_min_vertex_array_y = inner_vertex_array[np.where(np.abs(inner_x_vertex_array - min_x) < 3)][:, 0]
x_max_vertex_array_y = inner_vertex_array[np.where(np.abs(inner_x_vertex_array - max_x) < 3)][:, 0] + 1
y_min_vertex_array_x = inner_vertex_array[np.where(np.abs(inner_y_vertex_array - min_y) < 3)][:, 1]
y_max_vertex_array_x = inner_vertex_array[np.where(np.abs(inner_y_vertex_array - max_y) < 3)][:, 1] + 1
# print '&&&'
# print x_min_vertex_array_y, x_max_vertex_array_y, y_min_vertex_array_x, y_max_vertex_array_x,
vertx_array_y = np.intersect1d(x_min_vertex_array_y, x_max_vertex_array_y)
vertx_array_x = np.intersect1d(y_min_vertex_array_x, y_max_vertex_array_x)
# print(vertx_array_y)
# print(vertx_array_x)
if (len(vertx_array_y) !=0) or (len(vertx_array_x) !=0):
if len(vertx_array_y) == 0 :
min_vertex = min_x
max_vertex = max_x
cut_point_list = list(vertx_array_x)
cut_point_list.sort()
# print '!!!'
# print cut_point_list
new_vertex_x = map(lambda x: [cut_point_list[x], cut_point_list[x + 1] - 1], list(range(len(cut_point_list) - 1)))
new_vertex_x.insert(0, [min_x, cut_point_list[0] - 1])
new_vertex_x.append([cut_point_list[-1], max_x])
new_vertex_y = [[min_y, max_y]] * len(new_vertex_x)
else:
min_vertex = min_y
max_vertex = max_y
cut_point_list = list(vertx_array_y)
# print '!!!'
# print cut_point_list
cut_point_list.sort()
new_vertex_y = map(lambda x: [cut_point_list[x], cut_point_list[x + 1] - 1], list(range(len(cut_point_list) - 1)))
new_vertex_y.insert(0, [min_y, cut_point_list[0] -1])
new_vertex_y.append([cut_point_list[-1], max_y])
new_vertex_x = [[min_x, max_x]] * len(new_vertex_y)
new_vertex_list = map(zip, new_vertex_x, new_vertex_y)
propotion_list = list((np.array(cut_point_list)-min_vertex)/((max_vertex - min_vertex)*1.0))
cut_propotion_list = map(lambda x: propotion_list[x+1] - propotion_list[x], range(len(propotion_list) - 1))
cut_propotion_list.insert(0, propotion_list[0] - 0)
cut_propotion_list.append(1 - propotion_list[-1])
# else:
# cut_point_list = []
# cut_propotion_list = []
# new_vertex_list = []
# print 'ttt', cut_point_list, cut_propotion_list, new_vertex_list
return cut_point_list, cut_propotion_list, new_vertex_list
def cal_p_dirichlet(self, cut_propotion):
alpha = self.alpha_base * len(cut_propotion)
return scipy.stats.dirichlet.pdf(cut_propotion, alpha)
def cal_p_table_partion(self, curr_depth_table_partion):
self.curr_depth_table_partion = curr_depth_table_partion
self.flattend_curr_depth_table_partion = list(np.array(self.curr_depth_table_partion).flatten())
if self.flattend_curr_depth_table_partion.count(1) != len(self.flattend_curr_depth_table_partion):
self.next_depth_table_partion = map(lambda x: self.table_partion_list[x], np.array(range(len(self.flattend_curr_depth_table_partion) - self.flattend_curr_depth_table_partion.count(1))) + self.x + 1)
self.x = self.x + len(self.flattend_curr_depth_table_partion) - self.flattend_curr_depth_table_partion.count(1)
self.flattend_next_depth_table_partion = list(np.array(self.next_depth_table_partion).flatten())
print self.next_depth_table_partion
print self.permutation_table_partion_index
self.next_depth_index = map(lambda x: map(lambda y: x.index(y), np.array(range(len(self.next_depth_table_partion))) + self.flattend_curr_depth_table_partion.count(1)), self.permutation_table_partion_index)
print self.next_depth_index
self.next_depth_table_partion_index = map(lambda x: map(lambda y: np.array(x).argsort()[y], range(len(self.next_depth_table_partion))), self.next_depth_index)
self.next_depth_index_num = [0]
self.next_depth_index_num.extend(map(len, self.next_depth_table_partion))
self.next_depth_permutation_index_base = map(lambda x: map(lambda y: np.array(range(len(self.next_depth_table_partion[x[y]]))) + self.next_depth_index_num[x[y]], range(len(self.next_depth_table_partion))), self.next_depth_table_partion_index)
self.next_depth_permutation_table_partion_base = map(lambda x: map(lambda y: self.next_depth_table_partion[x[y]], range(len(self.next_depth_table_partion))), self.next_depth_table_partion_index)
self.next_depth_permutation_index_before_product = map(lambda x: map(list,(map(itertools.permutations, x))), self.next_depth_permutation_index_base)
self.next_depth_permutation_index_after_product = map(lambda x: list(itertools.product(*x)), self.next_depth_permutation_index_before_product)
self.next_depth_permutation_table_partion_before_product = map(lambda x: map(list,(map(itertools.permutations, x))), self.next_depth_permutation_table_partion_base)
self.next_depth_permutation_table_partion_after_product = map(lambda x: list(itertools.product(*x)), self.next_depth_permutation_table_partion_before_product)
# print '###'
# print self.next_depth_index, self.next_depth_table_partion, self.next_depth_table_partion_index, self.next_depth_permutation_index_base
# print '***'
# print self.next_depth_permutation_index_before_product, self.next_depth_permutation_index_after_product
# print self.next_depth_permutation_table_partion_before_product, self.next_depth_permutation_table_partion_after_product
# print self.next_depth_all_vertex, self.next_depth_all_vertex[0], self.next_depth_index
self.next_depth_vertex = map(lambda x: map(lambda y: map(lambda z: self.next_depth_all_vertex[x][y][z], self.next_depth_index[x]), range(len(self.next_depth_all_vertex[0]))), range(len(self.next_depth_all_vertex)))
self.next_depth_cut = map(lambda x: map(lambda y: map(lambda z: map(self.detect_cut_point_list, self.next_depth_vertex[x][y][z]), range(len(self.next_depth_vertex[0][0]))), range(len(self.next_depth_vertex[0]))), range(len(self.next_depth_vertex)))
self.next_depth_cut_point_list = np.array(self.next_depth_cut)[:,:,:,:,0]
self.next_depth_cut_propotion_list = np.array(self.next_depth_cut)[:,:,:,:,1]
self.next_depth_new_vertex_list = np.array(self.next_depth_cut)[:,:,:,:,2]
# self.next_depth_permutation_index_base = map(lambda x: map(lambda y: np.array(x).argsort()[y], range(len(self.next_depth_table_partion))), self.next_depth_table_partion_index)
# self.seperate_permutation_index =
print '!!!', self.next_depth_vertex, len(self.next_depth_cut_point_list[0][0][0][0])
self.next_result_list = map(lambda (x, y, z): self.cal_cut_point_and_corresponding_table_partion2(x, y, z), list(itertools.product(range(len(self.next_depth_permutation_table_partion_after_product)), range(len(self.next_depth_cut_point_list[0])), range(len(self.next_depth_permutation_table_partion_after_product[0])))))
print '***'
print self.next_result_list
self.p_list = np.array(self.next_result_list)[:,:,:,0]
# print '###', self.p_list
self.next_depth_all_vertex = np.array(self.next_result_list)[:,:,:,1]
print '###', self.next_depth_all_vertex
z = map(lambda x: map(lambda y: list(np.array(x[y]).flatten()), range(len(x))), self.next_depth_permutation_index_after_product)[0]
print len(z)
self.permutation_table_partion_index = z
self.cal_p_table_partion(self.next_depth_table_partion)
return
else:
return self.p_list
# def get_part(self, target_from_index, target_at_index):
# target_list = self.p_list[target_from_index]
# return map(lambda c: target_list[c][target_at_index], range(len(target_list)))
#
# def filter_p_positive(self, target):
# return filter(lambda x: x>0, target)
def cal_cut_point_and_corresponding_table_partion2(self, x, y, z):
print x,y,z
table_partion = list(self.next_depth_permutation_table_partion_after_product[x][z])
cut_point_list = self.next_depth_cut_point_list[x][y]
cut_propotion_list = self.next_depth_cut_propotion_list[x][y]
new_vertex_list = self.next_depth_new_vertex_list[x][y]
print table_partion, cut_point_list, cut_propotion_list, new_vertex_list
print self.next_depth_index
self.t = map(lambda x: map(lambda y: self.cal_cut_point_and_corresponding_table_partion3(table_partion[x], cut_point_list[x][y], cut_propotion_list[x][y], new_vertex_list[x][y]), range(len(self.next_depth_index))), range(len(table_partion)))
# self.p = map(lambda x, y )
tt = map(lambda x: self.flatten(x), range(len(self.t)))
print tt
return tt
def flatten(self, t_index):
if t_index == 0:
self.p = map(lambda x: self.t[t_index][x][0], range(len(self.t[t_index])))
print self.p
self.ne_vertex = map(lambda x: self.t[t_index][x][1], range(len(self.t[t_index])))
else:
self.pp = [[0]] * len(self.p) * len(self.t[t_index])
self.nne_vertex = [[0]] * len(self.p) * len(self.t[t_index])
map(lambda (x, y) : self.new_assign_value(x, y, t_index), list(itertools.product(range(len(self.t[t_index])), range(len(self.p)))))
# self.ne_vertex = map(lambda x: map(lambda y: self.ne_vertex[y].append(self.t[t_index][x][1]), range(len(self.ne_vertex))), range(len(self.t[t_index])))
self.p = self.pp
self.ne_vertex = self.nne_vertex
self.result = map(lambda x: [self.p[x], self.nne_vertex[x]], range(len(self.p)))
return self.result
def new_assign_value(self, x, y, t_index):
self.pp[x * len(self.p) + y] = [self.p[y][0] * self.t[t_index][x][0][0]]
if self.pp[x * len(self.p) + y][0] == 0:
self.nne_vertex[x * len(self.p) + y] = [[]]
else:
self.nne_vertex[x * len(self.p) + y] = [self.ne_vertex[y], self.t[t_index][x][1]]
def cal_cut_point_and_corresponding_table_partion3(self, table_partion, cut_point_list, cut_propotion_list, new_vertex_list):
print table_partion, cut_point_list, new_vertex_list
self.table_partion = table_partion
self.cut_point_list = cut_point_list
self.cut_propotion_list = cut_propotion_list
self.new_vertex_list = new_vertex_list
if (len(self.table_partion) - 1) > len(self.cut_point_list):
self.result = [[[0], []]]
else:
self.combination_index = list(itertools.combinations(range(len(self.cut_point_list)), (len(self.table_partion) - 1)))
# print self.cut_point_list, self.new_vertex_list
# print table_partion
# print self.combination_index
# self.combination_index_list = [self.combination_index] * len(self.permutation_table_partion)
# print self.combination_index_list
self.result = map(self.cal_p_cut_propotion, self.combination_index)
return self.result
def cal_cut_point_and_corresponding_table_partion(self, table_partion):
if (len(table_partion) - 1) > len(self.cut_point_list):
self.result = [[[0], []]]
else:
self.combination_index = list(itertools.combinations(range(len(self.cut_point_list)), (len(self.table_partion) - 1)))
# print self.cut_point_list, self.new_vertex_list
# print table_partion
# print self.combination_index
# self.combination_index_list = [self.combination_index] * len(self.permutation_table_partion)
# print self.combination_index_list
self.result = map(self.cal_p_cut_propotion, self.combination_index)
return self.result
# def cal_next_level_table_partion(self, table_partion_index)
def cal_p_cut_propotion(self, index):
# cut_point = map(lambda x:self.cut_point_list[x], list(index))
# print index
new_vertex = self.cal_new_vertex(index)
# print new_vertex
cut_propotion = self.cal_cut_propotion(index)
# print cut_propotion
# next_cut = np.array(map(self.detect_cut_point_list, new_vertex))
# next_cut_point_num = map(len, list(next_cut[:, 0]))
# print self.table_partion, next_cut_point_num
# diff = map(lambda(x, y): x - y, zip(self.table_partion, next_cut_point_num))
# if len(filter(lambda x: x <= 0, diff)) > 0:
# return [[0], [[]]]
# else:
return [[self.cal_p_dirichlet(cut_propotion)], new_vertex]
# def cal_combination_cut_point(self, cut_point_index):
# return map(lambda x:self.cut_point_list[x], cut_point_index)
#
def cal_permutation_table_partion(self, table_partion_index):
return map(lambda x:self.table_partion[x], table_partion_index)
def cal_cut_propotion(self, propotion_index):
if len(propotion_index) == (len(self.cut_propotion_list) - 1):
cut_propotion = self.cut_propotion_list
else:
cut_propotion = map(lambda x: np.array(self.cut_propotion_list)[propotion_index[x] + 1:propotion_index[x + 1] + 1].sum(), range(len(propotion_index) - 1))
cut_propotion.insert(0, np.array(self.cut_propotion_list)[0:propotion_index[0] + 1].sum())
cut_propotion.append(1 - np.array(self.cut_propotion_list)[0:propotion_index[-1] + 1].sum())
return cut_propotion
def cal_new_vertex(self, vertex_index):
print vertex_index
if len(vertex_index) == (len(self.new_vertex_list) - 1):
new_vertex = self.new_vertex_list
else:
new_vertex = map(lambda x: [self.new_vertex_list[vertex_index[x] + 1][0], self.new_vertex_list[vertex_index[x + 1]][1]], range(len(vertex_index) - 1))
new_vertex.insert(0, [self.new_vertex_list[0][0], self.new_vertex_list[vertex_index[0]][1]])
new_vertex.append([self.new_vertex_list[vertex_index[-1] + 1][0], self.new_vertex_list[-1][1]])
return new_vertex
def cal_p_one_table_partion_list(self, table_partion_list):
self.c = self.c + 1
print '***'
print self.c
self.table_partion_list = table_partion_list
self.vertex = [[(0, 0), (1023, 767)]]
self.p_list = [[[[1]]]]
self.next_depth_all_vertex = [[[[[(0, 0), (1023, 767)]]]]]
self.permutation_table_partion_index = [[[[0]]]]
self.x = 0
self.all_vertex_list = []
map(self.find_vertex, self.color_space)
p = self.cal_p_table_partion(self.table_partion_list[self.x])
# print p
return p
def cal_likelihood(self):
self.c = 0
map(self.cal_p_one_table_partion_list, self.all_table_partion_list[4:6])
def __call__(self, img_num):
self.img = cv2.imread('E:/ncrp/'+str(img_num)+'.png')
self.cal_likelihood()
if img_num == 3:
return
else:
return self.__call__(img_num - 1)
def main():
Decode_list = decoder(code_base = CODE_BASE)
All_Table_Partion_List = Decode_list()
Prior = prior(All_Table_Partion_List)
Prior_Probability = Prior()
Likelihood = likelihood(All_Table_Partion_List, COLOR_SPACE, ALPHA_BASE)
Likelihood_Probability = Likelihood(img_num = TREE_NUM )
if __name__ == "__main__":
main()
| [
"ningtangzju@gmail.com"
] | ningtangzju@gmail.com |
5b32271db5c92ee02645aa87c392e218c743cf69 | 4e8ac215b672b333f19da87787c0d8768fee439e | /MIDI Remote Scripts/ableton/v2/control_surface/control/control.py | b050616c5b7e2facc9c92208f7c4ed9e683e3dbf | [
"MIT"
] | permissive | aarkwright/ableton_devices | 593f47293c673aa56f6e0347ca6444b7fce2812a | fe5df3bbd64ccbc136bba722ba1e131a02969798 | refs/heads/master | 2020-07-02T08:11:21.137438 | 2019-08-09T13:48:06 | 2019-08-09T13:48:06 | 201,467,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,384 | py | # uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\ableton\v2\control_surface\control\control.py
# Compiled at: 2019-05-15 03:17:58
from __future__ import absolute_import, print_function, unicode_literals
from functools import partial
from ...base import lazy_attribute, mixin, nop, task, Disconnectable, EventObject, NamedTuple
__all__ = ('Control', 'InputControl', 'ControlManager', 'control_event', 'control_color',
'Connectable')
class ControlManager(EventObject):
"""
Base class needed to define Controls. The Control Manager stores the state of the
Controls.
"""
def __init__(self, *a, **k):
super(ControlManager, self).__init__(*a, **k)
self._control_states = dict()
def add_control(self, name, control):
"""
Dynamically adds a Control to the object. The Control will be added to the object
as an attribute with the given `name`.
"""
if hasattr(self, name):
raise AttributeError(b'Control would overwrite an existing property')
control_state = control._get_state(self)
setattr(self, name, control_state)
return control_state
@lazy_attribute
def _tasks(self):
"""
Task Group for Controls for time-based events and feedback.
"""
return task.TaskGroup()
def control_notifications_enabled(self):
"""
Override to enable/disable triggering events for all Controls in this
Control Manager.
"""
return True
def update(self):
"""
Sends the current feedback to all Control Elements that are connected to Controls
of this Control Manager.
"""
for control_state in self._control_states.values():
control_state.update()
def control_event(event_name):
"""
Defines an event of a Control. The event can be used in two ways:
* As a function-decorator on a class level
* By assigning a callable to the event
Only one listener can be connected with an event.
Events need to be defined on a Control class-level.
"""
def event_decorator(self):
def event_listener_decorator(event_listener):
assert event_listener not in self._event_listeners
self._event_listeners[event_name] = event_listener
return self
return event_listener_decorator
def event_setter(self, event_listener):
self._event_listeners[event_name] = event_listener
return property(event_decorator, event_setter)
class control_color(object):
"""
Defines a color of a Control. The color is created with a default color and will
be update the Control every time a new color is set.
Colors need to be defined on Control-state level.
"""
def __init__(self, default_color, *a, **k):
super(control_color, self).__init__(*a, **k)
self.default_color = default_color
def __get__(self, obj, owner):
if obj is None or self not in obj._colors:
return self.default_color
else:
return obj._colors[self]
def __set__(self, obj, val):
obj._colors[self] = val
obj._send_current_color()
class Control(object):
"""
Base class for all Controls. Controls are used to define a high level interface for
low level Control Elements. They add a useful set of functionality to it:
* Well defined and descriptive events that compensate for inconsistencies of
the received MIDI.
* Logic and state common in other UI frameworks, like an enabled state to deactivate
the Control under certain circumstances.
* Feedback to represents different states of the Control.
Controls are a virtual representation of a relation between a hardware control and
a piece of logic. A Control needs to be connected with a Control Element to be
functional. The Control Element is connected and disconnected by using
:meth:`Control.State.set_control_element`. The user of a Control does not need to
care a Control Element is currently connected, which makes working with Controls
much less error-prone than working with Control Elements directly.
Controls are a Descriptor on a class level, so listeners can be easily defined using
decorators. Events are defined using :func:`control_event`. Classes using Controls
need to inherit from :class:`ControlManager`.
The Control needs an actual stateful representation, that instantiated for each
instance of the class implementing it. This is defined in the inner State-class.
"""
class State(EventObject):
"""
State-full representation of the Control.
"""
enabled = True
def __init__(self, control=None, manager=None, *a, **k):
super(Control.State, self).__init__(*a, **k)
assert control is not None
assert manager is not None
self._colors = dict()
self._manager = manager
self._event_listeners = control._event_listeners
self._control_element = None
self._has_tasks = False
manager.register_disconnectable(self)
return
def disconnect(self):
super(Control.State, self).disconnect()
if self._has_tasks:
self.tasks.kill()
self.tasks.clear()
@lazy_attribute
def tasks(self):
"""
Returns a Task Group for this Control. The Task Group is created the first
time the property is accessed.
"""
self._has_tasks = True
return self._manager._tasks.add(task.TaskGroup())
def set_control_element(self, control_element):
"""
Connect a Control with a Control Element or disconnect the Control if
None is passed. When connecting, the Control Element is reset and the
Controls current color is sent. When disconnecting, the Control Element
needs to be updates by its new owner.
"""
self._control_element = control_element
if self._control_element:
self._control_element.reset_state()
def _call_listener(self, listener_name, *args):
listener = self._event_listeners.get(listener_name, None)
if listener is not None and self._notifications_enabled():
args = args + (self,)
listener(self._manager, *args)
return
def _has_listener(self, listener_name):
return listener_name in self._event_listeners
def _event_listener_required(self):
return len(self._event_listeners) > 0
def _notifications_enabled(self):
return self.enabled and self._manager.control_notifications_enabled()
def update(self):
pass
def _send_current_color(self):
pass
_extra_kws = {}
_extra_args = []
def __init__(self, extra_args=None, extra_kws=None, *a, **k):
super(Control, self).__init__(*a, **k)
self._event_listeners = {}
if extra_args is not None:
self._extra_args = extra_args
if extra_kws is not None:
self._extra_kws = extra_kws
return
def __get__(self, manager, owner):
if manager is not None:
return self._get_state(manager)
else:
return self
def __set__(self, manager, owner):
raise RuntimeError(b'Cannot change control.')
def _make_control_state(self, manager):
return self.State(control=self, manager=manager, *self._extra_args, **self._extra_kws)
def _get_state(self, manager, state_factory=None):
if self not in manager._control_states:
if state_factory is None:
state_factory = self._make_control_state
manager._control_states[self] = None
manager._control_states[self] = state_factory(manager)
if manager._control_states[self] is None:
raise RuntimeError(b'Cannot fetch state during construction of controls.')
return manager._control_states[self]
def _clear_state(self, manager):
if self in manager._control_states:
del manager._control_states[self]
class InputControl(Control):
"""
Base Class for Controls that react to a MIDI value event.
"""
value = control_event(b'value')
class State(Control.State):
"""
State-full representation of the Control.
"""
def __init__(self, control=None, channel=None, identifier=None, *a, **k):
super(InputControl.State, self).__init__(control=control, *a, **k)
self._value_slot = None
self._channel = channel
self._identifier = identifier
self._register_value_slot(self._manager, control)
self._manager.register_disconnectable(self)
return
def set_control_element(self, control_element):
"""
Connects the Control to the value-event of the Control Element and sets the
defined :attr:`channel` and :attr:`identifier`.
"""
super(InputControl.State, self).set_control_element(control_element)
if self._value_slot:
self._value_slot.subject = control_element
if self._control_element:
if self._channel is not None:
self._control_element.set_channel(self._channel)
if self._identifier is not None:
self._control_element.set_identifier(self._identifier)
return
def _register_value_slot(self, manager, control):
if self._event_listener_required():
self._value_slot = self.register_slot(None, self._on_value, b'value')
return
def _on_value(self, value, *a, **k):
self._call_listener(b'value', value)
@property
def channel(self):
"""
Translates the channel of the received MIDI when sent to Live.
"""
return self._channel
@channel.setter
def channel(self, channel):
self._channel = channel
if self._control_element:
self._control_element.set_channel(self._channel)
@property
def identifier(self):
"""
Translates the identifier of the received MIDI when sent to Live.
"""
return self._identifier
@identifier.setter
def identifier(self, value):
self._identifier = value
if self._control_element:
self._control_element.set_identifier(self._identifier)
class ProxyControl(object):
"""
Control that has its own event listeners, but forwards everything else from the
proxied control. This way, a derived class can forward the control of its base class.
"""
def __init__(self, control=None, *a, **k):
super(ProxyControl, self).__init__(*a, **k)
self._control = control
assert not self._control._event_listeners, b'Cannot forward control that already has events.'
def _make_control_state(self, manager):
"""
Pass the proxy control to the state, as this one includes the event handlers
"""
return self._control.State(control=self, manager=manager, *self._control._extra_args, **self._control._extra_kws)
def _get_state(self, manager, state_factory=None):
return self._control._get_state(manager, self._make_control_state)
def _clear_state(self, manager):
self._control._clear_state(manager)
def forward_control(control):
return mixin(ProxyControl, control.__class__)(control)
class NullSlot(Disconnectable):
pass
class Connectable(EventObject):
"""
Mixin for connecting a property with a control.
"""
requires_listenable_connected_property = False
def __init__(self, *a, **k):
super(Connectable, self).__init__(*a, **k)
self._connection = self._make_empty_connection()
def connect_property(self, subject, property_name, transform=nop):
"""
Create a bidirectional connection between a property and a Control.
The `subject` is the host of the property with the given name.
The connected property needs to be listenable in case
:attr:`requires_listenable_connected_property` is set to True.
If a Control is a Connectable, it has certain expectations on the connected
property.
The transform argument can be used to transform the Controls value to the
expected value of the property.
Only one property can be connected at a time.
"""
assert subject is not None
self.disconnect_property()
self._connection = NamedTuple(slot=self._register_property_slot(subject, property_name), getter=partial(getattr, subject, property_name), setter=partial(setattr, subject, property_name), transform=transform)
return
def disconnect_property(self):
"""
Disconnects a property that has been connected with :meth:`connect_property`.
"""
self._connection.slot.disconnect()
self._connection = self._make_empty_connection()
def _make_empty_connection(self):
return NamedTuple(slot=NullSlot(), getter=nop, setter=nop, transform=nop)
def _register_property_slot(self, subject, property_name):
if self.requires_listenable_connected_property:
return self.register_slot(subject, self.on_connected_property_changed, property_name)
else:
return NullSlot()
@property
def connected_property_value(self):
"""
Get/set the property connected with :meth:`connect_property`
"""
return self._connection.getter()
@connected_property_value.setter
def connected_property_value(self, value):
self._connection.setter(self._connection.transform(value))
def on_connected_property_changed(self, value):
"""
Called if the connected property changes.
Has no effect if :attr:`requires_listenable_connected_property` is set to False.
"""
pass
class SendValueMixin(object):
def __init__(self, *a, **k):
super(SendValueMixin, self).__init__(*a, **k)
self._value = 0
@property
def value(self):
return self._value
@value.setter
def value(self, value):
if self._value != value:
self._value = value
self._send_current_value()
def set_control_element(self, control_element):
super(SendValueMixin, self).set_control_element(control_element)
self._send_current_value()
def update(self):
super(SendValueMixin, self).update()
self._send_current_value()
def _send_current_value(self):
if self._control_element:
self._control_element.send_value(self._value)
class SendValueControl(Control):
class State(SendValueMixin, Control.State):
pass | [
"apollo.arkwright@gmail.com"
] | apollo.arkwright@gmail.com |
d534d3152604f66aba374500f80ac6a5d72d800e | a87c9544082acb38e74d25ced2f22370ffdca413 | /main.py | c7ecfba74fb9cf42f04bd48a49ed95f915597ec4 | [] | no_license | vladbusov/chess | ac3312e90d08408203ef93336b8dde1fc8d5b760 | ff63a45e22fbb4daebfa31bce88681a96b6c7043 | refs/heads/master | 2020-03-27T04:50:54.408867 | 2018-08-24T09:54:33 | 2018-08-24T09:54:33 | 145,973,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 46,571 | py | import pygame as pg
import math
import figure as fig_char
import draw
import random
def check_collision(list,col,row):
for fg in list:
if fg.col == col and fg.row == row:
return True
return False
def show_step(screen,col, row, color = (0, 255, 0) ):
if col < 1 or col > 8 or row > 8 or row < 1:
return
rect_1_rect = pg.Rect((200 + (col - 1) * 30, 110 + (row - 1) * 30), (30, 30))
rect_1_color = color
rect_1_width = 3
pg.draw.rect(screen, rect_1_color, rect_1_rect, rect_1_width)
class figure(object):
def __init__(self, color, role, screen):
self.color = color
self.role = role
self.screen = screen
fig_img = pg.image.load("images/game/" + self.role + "_" + self.color + ".png")
fig_img = pg.transform.scale(fig_img, (30, 30))
self.image = fig_img
fig_img = pg.image.load("images/game/" + self.role + "_" + self.color + ".png")
fig_img = pg.transform.scale(fig_img, (60, 60))
self.image_2 = fig_img
def draw(self, col, row ):
x = 200 + (col-1)*30
y = 110 + (row-1)*30
self.screen.blit(self.image, (x,y))
class unit(object):
def __init__(self, figure, col, row):
self.figure = figure
self.col = col
self.row = row
self.enable = True
def draw(self):
self.figure.draw(self.col, self.row)
def move(self,col,row,game):
self.col = col
self.row = row
game.change_side()
def highlight(self,x,y):
if x > 200 + (self.col-1)*30 and x < 200 + self.col*30 and y > 110 + (self.row-1)*30 and y < 110 + self.row*30:
return True
return False
def light(self):
rect_1_rect = pg.Rect((200 + (self.col-1)*30, 110 + (self.row-1)*30), (30, 30))
rect_1_color = (255, 255, 0)
rect_1_width = 3
pg.draw.rect(self.figure.screen, rect_1_color, rect_1_rect, rect_1_width)
def red_light(self):
rect_1_rect = pg.Rect((200 + (self.col - 1) * 30, 110 + (self.row - 1) * 30), (30, 30))
rect_1_color = (255, 0, 0)
rect_1_width = 4
pg.draw.rect(self.figure.screen, rect_1_color, rect_1_rect, rect_1_width)
def die(self):
self.col = -200
self.row = -200
self.enable = False
def make_step(self,x ,y, list, game, color = "black"):
row = math.floor((y - 110)/30 + 1)
col = math.floor((x - 200)/30 + 1)
if (col < 9 and col >= 1 and row < 9 and row >=1 ):
if self.figure.role == "pawn":
if ((self.row - row == 1 and color == "black") or (self.row - row == -1 and color == "white")) and self.col - col == 0:
empty_col = False
for fg in list:
if fg.col == col and fg.row == row:
empty_col = True
if empty_col == False:
self.move(col,row,game)
if ((self.row - row == 2 and color == "black") or (self.row - row == -2 and color == "white")) and self.col - col == 0:
empty_col = False
if self.row > 6:
for fg in list:
if fg.col == col and fg.row == row:
empty_col = True
if empty_col == False:
self.move(col,row, game)
if (self.row - row == 1 and color == "black") or (self.row - row == -1 and color == "white"):
if self.col - col == 1:
for fg in list[16:]:
if fg.col == col and fg.row == row:
if fg.figure.role == "king":
return
fg.die()
self.move(col,row,game)
if self.col - col == -1:
for fg in list[16:]:
if fg.col == col and fg.row == row:
if fg.figure.role == "king":
return
fg.die()
self.move(col,row,game)
if self.figure.role == "knight":
for i in range(-1, 2, 2):
for j in range(-2, 3, 4):
if col == self.col + i and row == self.row + j:
if check_collision(list[:16], self.col + i, self.row + j) == False:
for fg in list[16:]:
if fg.col == col and fg.row == row:
if fg.figure.role != "king":
fg.die()
else:
return
self.move(col,row,game)
elif col == self.col + j and row == self.row + i:
if check_collision(list[:16], self.col + j, self.row + i) == False:
for fg in list[16:]:
if fg.col == col and fg.row == row:
if fg.figure.role != "king":
fg.die()
else:
return
self.move(col,row,game)
if self.figure.role == "bishop" or self.figure.role == "queen":
block_up_left = False
block_up_right = False
block_down_left = False
block_down_right = False
for i in range(1, 9):
if check_collision(list[:16], self.col + i, self.row + i) == False:
if block_down_right == False:
if check_collision(list[16:], self.col + i, self.row + i) == False:
if self.col + i == col and self.row + i == row:
self.move(col,row,game)
else:
if self.col + i == col and self.row + i == row:
for fg in list[16:]:
if fg.col == col and fg.row == row:
if fg.figure.role != "king":
fg.die()
self.move(col, row,game)
block_down_right = True
else:
block_down_right = True
if check_collision(list[:16], self.col + i, self.row - i) == False:
if block_up_right == False:
if check_collision(list[16:], self.col + i, self.row - i) == False:
if self.col + i == col and self.row - i == row:
self.move(col,row,game)
else:
if self.col + i == col and self.row - i == row:
for fg in list[16:]:
if fg.col == col and fg.row == row:
if fg.figure.role != "king":
fg.die()
self.move(col, row,game)
block_up_right = True
else:
block_up_right = True
if check_collision(list[:16], self.col - i, self.row + i) == False:
if block_down_left == False:
if check_collision(list[16:], self.col - i, self.row + i) == False:
if self.col - i == col and self.row + i == row:
self.move(col,row,game)
else:
if self.col - i == col and self.row + i == row:
for fg in list[16:]:
if fg.col == col and fg.row == row:
if fg.figure.role != "king":
fg.die()
self.move(col, row,game)
block_down_left = True
else:
block_down_left = True
if check_collision(list[:16], self.col - i, self.row - i) == False:
if block_up_left == False:
if check_collision(list[16:], self.col - i, self.row - i) == False:
if self.col - i == col and self.row - i == row:
self.move(col,row,game)
else:
if self.col - i == col and self.row - i == row:
for fg in list[16:]:
if fg.col == col and fg.row == row:
if fg.figure.role != "king":
fg.die()
self.move(col, row,game )
block_up_left = True
else:
block_up_left = True
if self.figure.role == "rook" or self.figure.role == "queen":
block_up = False
block_right = False
block_left = False
block_down = False
for i in range(1, 9):
if check_collision(list[:16], self.col + i, self.row ) == False:
if block_right == False:
if check_collision(list[16:], self.col + i, self.row) == False:
if self.col + i == col and self.row == row:
self.move(col,row,game)
else:
if self.col + i == col and self.row == row:
for fg in list[16:]:
if fg.col == col and fg.row == row:
if fg.figure.role != "king":
fg.die()
self.move(col, row,game)
block_right = True
else:
block_right = True
if check_collision(list[:16], self.col - i, self.row ) == False:
if block_left == False:
if check_collision(list[16:], self.col - i, self.row) == False:
if self.col - i == col and self.row == row:
self.move(col,row,game)
else:
if self.col - i == col and self.row == row:
for fg in list[16:]:
if fg.col == col and fg.row == row:
if fg.figure.role != "king":
fg.die()
self.move(col, row,game)
block_left = True
else:
block_left = True
if check_collision(list[:16], self.col, self.row + i ) == False:
if block_down == False:
if check_collision(list[16:], self.col, self.row + i) == False:
if self.col == col and self.row + i == row:
self.move(col,row,game)
else:
if self.col == col and self.row + i == row:
for fg in list[16:]:
if fg.col == col and fg.row == row:
if fg.figure.role != "king":
fg.die()
self.move(col, row,game)
block_down = True
else:
block_down = True
if check_collision(list[:16], self.col, self.row - i ) == False:
if block_up == False:
if check_collision(list[16:], self.col, self.row - i) == False:
if self.col == col and self.row - i == row:
self.move(col,row,game)
else:
if self.col == col and self.row - i == row:
for fg in list[16:]:
if fg.col == col and fg.row == row:
if fg.figure.role != "king":
fg.die()
self.move(col, row,game)
block_up = True
else:
block_up = True
if self.figure.role == "king":
for i in range(-1, 2):
for j in range(-1, 2):
if col == self.col + i and row == self.row + j:
if (i != 0 or j != 0) and check_collision(list[:16], self.col + i, self.row + j) == False:
if check_collision(list[16:], self.col + i, self.row + j) == False:
self.move(col,row, game)
else:
for fg in list[16:]:
if fg.col == self.col + i and fg.row == self.row + j:
if fg.figure.role != "king":
fg.die()
self.move(col,row, game)
def show_steps(self,list):
if self.figure.role == "pawn":
if self.row > 6:
empty_col = False
for fg in list:
if fg.col == self.col and fg.row == self.row - 2:
empty_col = True
if empty_col == False:
show_step(self.figure.screen, self.col, self.row - 2)
empty_col = False
for fg in list:
if fg.col == self.col and fg.row == self.row -1:
empty_col = True
if empty_col == False:
show_step(self.figure.screen, self.col, self.row - 1)
new_row = self.row - 1
new_col1 = self.col - 1
new_col2 = self.col + 1
for fg in list[16:]:
if fg.row == new_row:
if fg.col == new_col1:
if fg.figure.role != "king":
show_step(self.figure.screen,new_col1, new_row, (150, 255, 150))
if fg.col == new_col2:
if fg.figure.role != "king":
show_step(self.figure.screen, new_col2, new_row, (150, 255, 150))
if self.figure.role == "knight":
for i in range(-1,2,2):
for j in range(-2,3,4):
if check_collision( list[:16], self.col + i, self.row + j) == False:
if check_collision( list[16:], self.col + i, self.row + j) == False:
show_step(self.figure.screen, self.col + i, self.row + j)
else:
king = True
for fg in list[16:]:
if fg.col == self.col + i and fg.row == self.row + j:
if fg.figure.role == "king":
king = False
if king == True:
show_step(self.figure.screen, self.col + i, self.row + j, (150, 255, 150))
if check_collision( list[:16], self.col + j, self.row + i) == False:
if check_collision( list[16:], self.col + j, self.row + i) == False:
show_step(self.figure.screen, self.col + j, self.row + i)
else:
king = True
for fg in list[16:]:
if fg.col == self.col + j and fg.row == self.row + i:
if fg.figure.role == "king":
king = False
if king == True:
show_step(self.figure.screen, self.col + j, self.row + i, (150, 255, 150))
if self.figure.role == "bishop" or self.figure.role == "queen":
block_up_left = False
block_up_right = False
block_down_left = False
block_down_right = False
for i in range(1,9):
if check_collision(list[:16], self.col + i, self.row + i) == False:
if block_down_right == False:
if check_collision(list[16:], self.col + i, self.row + i) == False:
show_step(self.figure.screen, self.col + i, self.row + i)
else:
for fg in list[16:]:
if fg.col == self.col + i and fg.row == self.row + i:
if fg.figure.role != "king":
show_step(self.figure.screen, self.col + i, self.row + i, (150,255,150))
block_down_right = True
else:
block_down_right = True
if check_collision(list[:16], self.col + i, self.row - i) == False:
if block_up_right == False:
if check_collision(list[16:], self.col + i, self.row - i) == False:
show_step(self.figure.screen, self.col + i, self.row - i)
else:
for fg in list[16:]:
if fg.col == self.col + i and fg.row == self.row - i:
if fg.figure.role != "king":
show_step(self.figure.screen, self.col + i, self.row - i, (150,255,150))
block_up_right = True
else:
block_up_right = True
if check_collision(list[:16], self.col - i, self.row + i) == False:
if block_down_left == False:
if check_collision(list[16:], self.col - i, self.row + i) == False:
show_step(self.figure.screen, self.col - i, self.row + i)
else:
for fg in list[16:]:
if fg.col == self.col - i and fg.row == self.row + i:
if fg.figure.role != "king":
show_step(self.figure.screen, self.col - i, self.row + i, (150,255,150))
block_down_left = True
else:
block_down_left = True
if check_collision(list[:16], self.col - i, self.row - i) == False:
if block_up_left == False:
if check_collision(list[16:], self.col - i, self.row - i) == False:
show_step(self.figure.screen, self.col - i, self.row - i)
else:
for fg in list[16:]:
if fg.col == self.col - i and fg.row == self.row - i:
if fg.figure.role != "king":
show_step(self.figure.screen, self.col - i, self.row - i, (150,255,150))
block_up_left = True
else:
block_up_left = True
if self.figure.role == "rook" or self.figure.role == "queen":
block_up = False
block_down = False
block_left = False
block_right = False
for i in range(1,9):
if check_collision(list[:16], self.col + i, self.row) == False:
if block_right == False:
if check_collision(list[16:], self.col + i, self.row) == False:
show_step(self.figure.screen, self.col + i, self.row)
else:
for fg in list[16:]:
if fg.col == self.col + i and fg.row == self.row:
if fg.figure.role != "king":
show_step(self.figure.screen, self.col + i, self.row, (150, 255, 150))
block_right = True
else:
block_right = True
if check_collision(list[:16], self.col - i, self.row) == False:
if block_left == False:
if check_collision(list[16:], self.col - i, self.row) == False:
show_step(self.figure.screen, self.col - i, self.row)
else:
for fg in list[16:]:
if fg.col == self.col - i and fg.row == self.row:
if fg.figure.role != "king":
show_step(self.figure.screen, self.col - i, self.row, (150, 255, 150))
block_left = True
else:
block_left = True
if check_collision(list[:16], self.col, self.row + i) == False:
if block_down == False:
if check_collision(list[16:], self.col, self.row + i) == False:
show_step(self.figure.screen, self.col, self.row + i)
else:
for fg in list[16:]:
if fg.col == self.col and fg.row == self.row + i:
if fg.figure.role != "king":
show_step(self.figure.screen, self.col, self.row + i, (150, 255, 150))
block_down = True
else:
block_down = True
if check_collision(list[:16], self.col, self.row - i) == False:
if block_up == False:
if check_collision(list[16:], self.col, self.row - i) == False:
show_step(self.figure.screen, self.col, self.row - i)
else:
for fg in list[16:]:
if fg.col == self.col and fg.row == self.row - i:
if fg.figure.role != "king":
show_step(self.figure.screen, self.col, self.row - i, (150, 255, 150))
block_up = True
else:
block_up = True
if self.figure.role == "king":
for i in range(-1, 2):
for j in range(-1, 2):
if (i != 0 or j != 0) and check_collision(list[:16], self.col + i, self.row + j) == False:
if check_collision(list[16:], self.col + i, self.row + j) == False:
show_step(self.figure.screen, self.col + i, self.row + j)
else:
for fg in list[16:]:
if fg.col == self.col + i and fg.row == self.row + j:
if fg.figure.role != "king":
show_step(self.figure.screen, self.col + i, self.row + j, (150, 255, 150))
def show_steps_list(self,list,color = "black"):
result_steps = []
result_atack = []
if self.figure.role == "pawn":
if (self.row > 6 and color == "black") or (self.row < 3 and color == "white"):
empty_col = False
for fg in list:
if fg.col == self.col:
if (fg.row == self.row - 2 and color == "black") or (fg.row == self.row + 2 and color == "white"):
empty_col = True
if empty_col == False:
if color == "black":
result_steps.append( [self.col, self.row - 2])
else:
result_steps.append( [self.col, self.row + 2])
empty_col = False
for fg in list:
if fg.col == self.col:
if (fg.row == self.row - 1 and color == "black") or (fg.row == self.row + 1 and color == "white"):
empty_col = True
if empty_col == False:
if color == "black":
result_steps.append( [self.col, self.row - 1])
else:
result_steps.append( [self.col, self.row + 1])
if color == "black":
new_row = self.row - 1
else:
new_row = self.row + 1
new_col1 = self.col - 1
new_col2 = self.col + 1
for fg in list[16:]:
if fg.row == new_row:
if fg.col == new_col1:
result_atack.append( [new_col1, new_row] )
if fg.col == new_col2:
result_atack.append( [new_col2, new_row] )
if self.figure.role == "knight":
for i in range(-1,2,2):
for j in range(-2,3,4):
if check_collision( list[:16], self.col + i, self.row + j) == False:
if check_collision( list[16:], self.col + i, self.row + j) == False:
result_steps.append( [self.col + i, self.row + j] )
else:
result_atack.append([self.col + i, self.row + j])
if check_collision( list[:16], self.col + j, self.row + i) == False:
if check_collision( list[16:], self.col + j, self.row + i) == False:
result_steps.append([self.col + j, self.row + i])
else:
result_atack.append([self.col + j, self.row + i])
if self.figure.role == "bishop" or self.figure.role == "queen":
block_up_left = False
block_up_right = False
block_down_left = False
block_down_right = False
for i in range(1,9):
if check_collision(list[:16], self.col + i, self.row + i) == False:
if block_down_right == False:
if check_collision(list[16:], self.col + i, self.row + i) == False:
result_steps.append([self.col + i, self.row + i])
else:
result_atack.append([self.col + i, self.row + i])
block_down_right = True
else:
block_down_right = True
if check_collision(list[:16], self.col + i, self.row - i) == False:
if block_up_right == False:
if check_collision(list[16:], self.col + i, self.row - i) == False:
result_steps.append([self.col + i, self.row - i])
else:
result_atack.append([self.col + i, self.row - i])
block_up_right = True
else:
block_up_right = True
if check_collision(list[:16], self.col - i, self.row + i) == False:
if block_down_left == False:
if check_collision(list[16:], self.col - i, self.row + i) == False:
result_steps.append([self.col - i, self.row + i])
else:
result_atack.append([self.col - i, self.row + i])
block_down_left = True
else:
block_down_left = True
if check_collision(list[:16], self.col - i, self.row - i) == False:
if block_up_left == False:
if check_collision(list[16:], self.col - i, self.row - i) == False:
result_steps.append([ self.col - i, self.row - i])
else:
result_atack.append([self.col - i, self.row - i])
block_up_left = True
else:
block_up_left = True
if self.figure.role == "rook" or self.figure.role == "queen":
block_up = False
block_down = False
block_left = False
block_right = False
for i in range(1,9):
if check_collision(list[:16], self.col + i, self.row) == False:
if block_right == False:
if check_collision(list[16:], self.col + i, self.row) == False:
result_steps.append([self.col + i, self.row])
else:
result_atack.append([self.col + i, self.row])
block_right = True
else:
block_right = True
if check_collision(list[:16], self.col - i, self.row) == False:
if block_left == False:
if check_collision(list[16:], self.col - i, self.row) == False:
result_steps.append([self.col - i, self.row])
else:
result_atack.append([self.col - i, self.row])
block_left = True
else:
block_left = True
if check_collision(list[:16], self.col, self.row + i) == False:
if block_down == False:
if check_collision(list[16:], self.col, self.row + i) == False:
result_steps.append([self.col, self.row + i])
else:
result_atack.append( [self.col, self.row + i])
block_down = True
else:
block_down = True
if check_collision(list[:16], self.col, self.row - i) == False:
if block_up == False:
if check_collision(list[16:], self.col, self.row - i) == False:
result_steps.append([self.col, self.row - i])
else:
result_atack.append([self.col, self.row - i])
block_up = True
else:
block_up = True
if self.figure.role == "king":
for i in range(-1, 2):
for j in range(-1, 2):
if (i != 0 or j != 0) and check_collision(list[:16], self.col + i, self.row + j) == False:
if check_collision(list[16:], self.col + i, self.row + j) == False:
result_steps.append([self.col + i, self.row + j])
else:
result_atack.append([self.col + i, self.row + j])
return result_steps,result_atack
def mat(self, step_list, list):
if self.figure.role == "king":
for i in range(-1,2):
for j in range(-1,2):
if self.col + i > 0 and self.col + i < 9 and self.row + j > 0 and self.row + j < 9:
if (i != 0 or j != 0) and check_collision(list, self.col + i, self.row + j) == False:
attack = False
for step in step_list:
if self.col + i == step[0] and self.row + j == step[1]:
attack = True
if attack == False:
return False
return True
def draw_square(screen, size, x, y, color):
rect_1_rect = pg.Rect((x, y), (size, size))
if color == "black":
rect_1_color = (50, 50, 50)
if color == "white":
rect_1_color = (255,255,255)
rect_1_width = 0
pg.draw.rect(screen, rect_1_color, rect_1_rect, rect_1_width)
def draw_square_contur(screen, size, x, y, width):
rect_1_rect = pg.Rect((x, y), (size, size))
rect_1_color = (50, 50, 50)
rect_1_width = width
pg.draw.rect(screen, rect_1_color, rect_1_rect, rect_1_width)
def main():
pg.init()
pg.display.set_caption("chess simulator")
screen = pg.display.set_mode((640,480))
running = True
image = pg.image.load("images/menu/background.jpg")
image2 = pg.image.load("images/game/background.jpg")
chose = 0
ch = 0
gamestate = 0
game_start = False
current_game = draw.game()
#инициализация фигур
white_pawn = figure("white", "pawn", screen)
black_pawn = figure("black", "pawn", screen)
white_rook = figure("white", "rook", screen)
black_rook = figure("black", "rook", screen)
white_bishop = figure("white", "bishop", screen)
black_bishop = figure("black", "bishop", screen)
white_knight = figure("white", "knight", screen)
black_knight = figure("black", "knight", screen)
white_king = figure("white", "king", screen)
black_king = figure("black", "king", screen)
white_queen = figure("white","queen", screen)
black_queen = figure("black", "queen", screen)
# инициализация белых фигур
black_teem = []
white_teem = []
# первые 8 фигур в списке пешки
for i in range(1, 9):
white_pawn_unit = unit(white_pawn, i, 2)
white_teem.append(white_pawn_unit)
black_pawn_unit = unit(black_pawn, i, 7)
black_teem.append(black_pawn_unit)
# вторые 2 фигуры ладьи
white_teem.append(unit(white_rook, 1, 1))
white_teem.append(unit(white_rook, 8, 1))
###
black_teem.append(unit(black_rook, 1, 8))
black_teem.append(unit(black_rook, 8, 8))
# третьи 2 фигуры кони 11 - 12
white_teem.append(unit(white_knight, 2, 1))
white_teem.append(unit(white_knight, 7, 1))
###
black_teem.append(unit(black_knight, 2, 8))
black_teem.append(unit(black_knight, 7, 8))
# четвертые 2 фигуры слоны 13-14
white_teem.append(unit(white_bishop, 3, 1))
white_teem.append(unit(white_bishop, 6, 1))
###
black_teem.append(unit(black_bishop, 3, 8))
black_teem.append(unit(black_bishop, 6, 8))
# пятнадцатая фигура - ферзь
white_teem.append(unit(white_queen, 5, 1))
black_teem.append(unit(black_queen, 5, 8))
# шестнадцатая фигура - король
white_teem.append(unit(white_king, 4, 1))
black_teem.append(unit(black_king, 4, 8))
# выбор фигуры
chose_figure = 0
while running:
pg.display.flip()
# режим меню
if gamestate == 0:
# фон рисуется
screen.blit(image, (0, 0))
font = pg.font.Font(None, 70)
game = font.render("Chess Simulator", True, (255, 255, 255))
screen.blit(game, [130, 100])
font = pg.font.Font(None, 50)
play = None
if chose != 1:
if game_start == False:
play = font.render("PLAY", True, (255, 255, 255))
screen.blit(play, [270, 200])
else:
play = font.render("CONTINUE", True, (230, 200, 255))
screen.blit(play, [230, 200])
if chose != 3:
playnew = font.render("NEW GAME", True, (220, 255, 220))
else:
playnew = font.render("NEW GAME", True, (255, 255, 0))
screen.blit(playnew, [230, 268])
else:
if game_start == False:
play = font.render("PLAY", True, (255, 255, 0))
screen.blit(play, [270, 200])
else:
play = font.render("CONTINUE", True, (230, 200, 0))
screen.blit(play, [230, 200])
if chose != 3:
playnew = font.render("NEW GAME", True, (220, 255, 220))
else:
playnew = font.render("NEW GAME", True, (255, 255, 0))
screen.blit(playnew, [230, 268])
if chose != 2:
options = font.render("OPTIONS", True, (255, 255, 255))
screen.blit(options, [240, 235])
else:
options = font.render("OPTIONS", True, (255, 255, 0))
screen.blit(options, [240, 235])
# все остальное
pos = pg.mouse.get_pos()
mouse_x, mouse_y = pos[0], pos[1]
if mouse_x > 270 and mouse_x < 270 + play.get_width() and mouse_y > 200 and mouse_y < 200 + play.get_height():
chose = 1
for ev in pg.event.get():
if ev.type == pg.MOUSEBUTTONDOWN:
gamestate = 1
game_start = True
screen.fill((255, 255, 255))
elif mouse_x > 240 and mouse_x < 240 + options.get_width() and mouse_y > 235 and mouse_y < 235 + options.get_height():
chose = 2
elif game_start == True:
if mouse_x > 230 and mouse_x < 230 + playnew.get_width() and mouse_y > 268 and mouse_y < 268 + playnew.get_height():
chose = 3
for ev in pg.event.get():
if ev.type == pg.MOUSEBUTTONDOWN:
game_start = False
# инициализация белых фигур
black_teem = []
white_teem = []
# первые 8 фигур в списке пешки
for i in range(1, 9):
white_pawn_unit = unit(white_pawn, i, 2)
white_teem.append(white_pawn_unit)
black_pawn_unit = unit(black_pawn, i, 7)
black_teem.append(black_pawn_unit)
# вторые 2 фигуры ладьи
white_teem.append(unit(white_rook, 1, 1))
white_teem.append(unit(white_rook, 8, 1))
###
black_teem.append(unit(black_rook, 1, 8))
black_teem.append(unit(black_rook, 8, 8))
# третьи 2 фигуры кони 11 - 12
white_teem.append(unit(white_knight, 2, 1))
white_teem.append(unit(white_knight, 7, 1))
###
black_teem.append(unit(black_knight, 2, 8))
black_teem.append(unit(black_knight, 7, 8))
# четвертые 2 фигуры слоны 13-14
white_teem.append(unit(white_bishop, 3, 1))
white_teem.append(unit(white_bishop, 6, 1))
###
black_teem.append(unit(black_bishop, 3, 8))
black_teem.append(unit(black_bishop, 6, 8))
# пятнадцатая фигура - ферзь
white_teem.append(unit(white_queen, 5, 1))
black_teem.append(unit(black_queen, 5, 8))
# шестнадцатая фигура - король
white_teem.append(unit(white_king, 4, 1))
black_teem.append(unit(black_king, 4, 8))
chose_figure = 0
else:
chose = 0
else:
chose = 0
# режим игры
if gamestate == 1:
# фон
screen.blit(image2,(0,0))
# логка игры
for i in range(1,9):
for j in range(1,9):
k = (j+1) + i*9
if k%2 == 0:
draw_square(screen,30,200 + (j-1)*30 ,110 + (i-1)*30,"black")
else:
draw_square(screen, 30, 200 + (j - 1) * 30, 110 + (i - 1) * 30, "white")
draw_square_contur(screen,242, 198, 108, 2)
# инициальзация легенды поля
legend = pg.font.Font(None, 30)
for i in range(1,9):
num = legend.render(str(i), True, (0, 0, 0))
screen.blit(num, [180, 118 + (i-1)*30])
for i in range(1,9):
letter = chr(ord('a') + (i-1))
let = legend.render(letter, True, (0,0,0))
screen.blit(let, [208 + (i-1)*30, 355 ])
pos = pg.mouse.get_pos()
mouse_x, mouse_y = pos[0], pos[1]
# ПРОРИСОВКА ЮНИТОВ
count = 1
for un in black_teem:
if un.enable:
# проверки на действие
if un.highlight(mouse_x,mouse_y) == True:
un.light()
for ev in pg.event.get():
if ev.type == pg.MOUSEBUTTONDOWN:
chose_figure = count
un.draw()
count = count + 1
for un in white_teem:
# проверки на действие
if un.enable:
if un.highlight(mouse_x, mouse_y) == True:
un.light()
un.draw()
y_cnt = 0
col_cnt = 0
for zombi in white_teem:
if zombi.enable == False:
x = 30 + col_cnt*28
y = 110 + y_cnt*45
y_cnt = y_cnt + 1
if y_cnt >= 5:
col_cnt = col_cnt + 1
y_cnt = 0
screen.blit(zombi.figure.image_2, (x, y))
y_cnt = 0
col_cnt = 0
for zombi in black_teem:
if zombi.enable == False:
x = 460 + col_cnt * 28
y = 110 + y_cnt * 45
if y_cnt >= 5:
col_cnt = col_cnt + 1
y_cnt = 0
screen.blit(zombi.figure.image_2, (x, y))
if current_game.black == True:
if chose_figure != 0:
black_teem[chose_figure-1].red_light()
black_teem[chose_figure-1].show_steps( black_teem + white_teem)
for ev in pg.event.get():
if ev.type == pg.MOUSEBUTTONDOWN:
black_teem[chose_figure-1].make_step(mouse_x,mouse_y, black_teem + white_teem, current_game)
else:
chose_figure = random.randint(1,16)
st,at = white_teem[chose_figure - 1].show_steps_list(white_teem + black_teem,"white")
steps = st + at
if len(steps) != 0:
white_teem[chose_figure - 1].red_light()
chosing_step = random.randint(1,len(steps) ) - 1
new_x = 200 + (steps[chosing_step][0]-1) * 30 + 15
new_y = 110 + (steps[chosing_step][1]-1) * 30 + 15
white_teem[chose_figure-1].make_step(new_x, new_y, white_teem + black_teem, current_game, color = "white")
chose_figure = 0
black_future_steps = []
black_future_attacks = []
white_future_steps = []
white_future_attacks = []
for fg in black_teem:
st,at = fg.show_steps_list(black_teem + white_teem)
black_future_steps = black_future_steps + st
black_future_attacks = black_future_attacks + at
for cords in black_future_attacks:
if white_teem[15].col == cords[0] and white_teem[15].row == cords[1]:
font = pg.font.Font(None, 30)
check = font.render("CHECK", True, (190,0,0))
screen.blit(check,[150,70])
# логика внутриигрового меню
font = pg.font.Font(None, 50)
if ch == 0:
pause = font.render("pause", True, (0, 0, 0))
else:
pause = font.render("pause", True, (255, 255, 0))
screen.blit(pause, [260, 60])
if mouse_x > 260 and mouse_x < 260 + pause.get_width() and mouse_y > 60 and mouse_y < 60 + pause.get_height():
ch = 1
if ev.type == pg.MOUSEBUTTONDOWN:
gamestate = 0
else:
ch = 0
if mouse_x < 200 or mouse_y < 110 or mouse_x > 440 or mouse_y > 350:
for ev in pg.event.get():
if ev.type == pg.MOUSEBUTTONDOWN:
chose_figure = 0
# выход
for ev in pg.event.get():
if ev.type == pg.QUIT:
running = False
if __name__ == "__main__":
main() | [
"to2002to2002@gmail.com"
] | to2002to2002@gmail.com |
90cb95220e9cdd80cd0f3a564548daa712b91caa | 1440dff537b6dd19d210ff6cfea425b2383a4949 | /PythonForEverybody/Ch2/Ex5_temp_converter.py | 567498e4b0ad7c975ab7855f0baaedff33d5fcc6 | [] | no_license | kieranmcgregor/Python | 519b183d0e3a306cc515dc04f53325ae9a880922 | 4750e46316910492b3763369d0b0d85d0ead812b | refs/heads/master | 2020-07-07T00:19:51.597429 | 2018-06-17T05:29:45 | 2018-06-17T05:29:45 | 74,042,634 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | celsius_empty = True
print("This program converts Celsius to Fahrenheit for all you yanks.")
while celsius_empty:
celsius = input("Please enter a temperature in Celsius.\n")
try:
celsius = float(celsius)
celsius_empty = False
except:
print ("Invalid entry, please enter a number.")
fahrenheit = celsius * (9/5) + 32
print ("{:.2f} C is equivalent to {:.2f} F.".format(celsius, fahrenheit))
| [
"kieranmcgregor@gmail.com"
] | kieranmcgregor@gmail.com |
48b211d3ffc2fe351f125460bfa2de347c5ad89c | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/frenetic-lang_pyretic/pyretic-master/pyretic/tests/test_mac_learner.py | 19c0e4482c157a77029346d963681440c374e52d | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 2,387 | py | #!/usr/bin/python
from mininet.net import Mininet
from mininet.node import RemoteController
import os, shlex, subprocess, utils, time
from utils import init
### Module Parameters
def get_controller():
return 'pyretic.modules.mac_learner'
def run_mininet():
# mn = Mininet()
# s1 = mn.addSwitch('s1')
# s2 = mn.addSwitch('s2')
# s3 = mn.addSwitch('s3')
# h1 = mn.addHost('h1')
# h2 = mn.addHost('h2')
# h3 = mn.addHost('h3')
# mn.addLink(s1, s2)
# mn.addLink(s1, s3)
# mn.addLink(s2, s3)
# mn.addLink(h1, s1)
# mn.addLink(h2, s2)
# mn.addLink(h3, s3)
# mn.addController('c0', RemoteController)
# time.sleep(1)
# mn.run(mn.pingAll)
# Alternately, run mininet via the command line. Note that we need to use
# absolute path names because sudo mucks with the env.
mn = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../mininet.sh'))
cmd = '%s --topo cycle,3,4 --mac --test=pingall' % mn
subprocess.call(shlex.split(cmd))
def process_controller_output(oldf, newf):
lines = oldf.readlines()
lines.sort()
keywords = ['TEST', 'ERROR', 'error']
## filter out lines that do not contain one of the keywords
for line in lines:
for kw in keywords:
if line.find(kw) >= 0:
newf.write(line)
def process_mininet_output(oldf, newf):
lines = oldf.readlines()
lines.sort()
keywords = ['TEST', 'ERROR', 'error', 'received']
## filter out lines that do not contain one of the keywords
for line in lines:
for kw in keywords:
if line.find(kw) >= 0:
newf.write(line)
### Tests
test_mac_learner = utils.TestModule( __name__, __file__, get_controller, run_mininet, process_controller_output, process_mininet_output)
def test_mac_learner_i(init):
utils.run_test(test_mac_learner, init.test_dir, init.benchmark_dir, '-m i')
def test_mac_learner_r0(init):
utils.run_test(test_mac_learner, init.test_dir, init.benchmark_dir, '-m r0')
def test_mac_learner_p0(init):
utils.run_test(test_mac_learner, init.test_dir, init.benchmark_dir, '-m p0')
# def test_mac_learner_p0_nx(init):
# utils.run_test(test_mac_learner, init.test_dir, init.benchmark_dir, '-m p0 --nx')
### Executing this file starts the mininet instance for this test.
if __name__ == "__main__":
run_mininet()
| [
"659338505@qq.com"
] | 659338505@qq.com |
89e2e5baa7853e3655458c8e7b859fabd2958057 | f10c419e99854949856536322f7e0f229fa54731 | /usagov-midterm-1/1-6.py | b038bc04456cea540299f21665495a2f0f3bdc32 | [] | no_license | ljforman/compjour-hw | 1f0fc598724cef7dbd121d5cb6097dcc15ba789b | 265b0206bb6890b0124a6aa10b4caf66f032a890 | refs/heads/master | 2016-09-05T11:21:09.297356 | 2015-05-07T21:11:00 | 2015-05-07T21:11:00 | 33,689,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,460 | py | import requests
BASE_USAJOBS_URL = "https://data.usajobs.gov/api/jobs"
names = ['California', 'Florida', 'Maryland', 'New York']
thelist = []
thelist.append(["State", "Job Count"])
for n in names:
atts = {'CountrySubdivision': n, 'NumberOfJobs': 1}
resp = requests.get(BASE_USAJOBS_URL, params = atts)
jobcount = int(resp.json()['TotalJobs'])
thelist.append([n, jobcount])
chartcode = """
<!DOCTYPE html>
<html>
<head>
<title>Sample Chart</title>
<script type="text/javascript" src="https://www.google.com/jsapi"></script>
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css">
</head>
<body>
<script type="text/javascript">
google.load("visualization", '1.1', {packages:['corechart']});
google.setOnLoadCallback(drawChart);
function drawChart() {
var data = %s
var datatable = google.visualization.arrayToDataTable(data);
var options = {
width: 600,
height: 400,
legend: { position: 'none' },
};
var chart = new google.visualization.BarChart(document.getElementById('mychart'));
chart.draw(datatable, options);
}
</script>
<div class="container">
<h1 style="text-align:center">Hello chart</h1>
<div id="mychart"></div>
</div>
</body>
</html>
"""
htmlfile = open("1-6.html", "w")
htmlfile.write(chartcode % thelist)
htmlfile.close()
| [
"ljlandry@gmail.com"
] | ljlandry@gmail.com |
7a36dd518dea63f2ff45446ae44a9e43136e79d1 | 10d23d6a193f1298513c0b472fae975c5e620a46 | /educational_centre/urls.py | 6ef7f712e325a321133ce887ca4267d5faf5ed88 | [] | no_license | RahimjonPc/centre | 7bc1a57dd49212a5dafb90d203a4234b29bd4336 | d90b43f417c522b21a0f8e92eb09add44764d85a | refs/heads/main | 2023-04-12T12:43:44.767769 | 2021-05-15T11:14:03 | 2021-05-15T11:14:03 | 339,373,487 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,858 | py | """educational_centre URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
from . import views
from django.views.static import serve
from django.conf.urls import url
urlpatterns = [
path('admin/', admin.site.urls),
path('api-auth/', include('rest_framework.urls')),
path('auth/', include('djoser.urls')),
path('auth/', include('djoser.urls.authtoken')),
path('auth/', include('djoser.urls.jwt')),
path('', views.AuthericationView.as_view(), name='home'),
path('user', include('users.urls')),
path('cource', include('cources.urls')),
path('event', include('events.urls')),
path('mark', include('marks.urls')),
path('api/', include('api.urls')),
path('api/token/', TokenObtainPairView.as_view()),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('admin/', admin.site.urls),
path('', include("Home.urls")),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"abduganievrahimjon@gmail.com"
] | abduganievrahimjon@gmail.com |
d920d5b1e072cf4e51a188331916ae577e8a0295 | 2fa75564dd61bd3f66d424064b985285ffb9bcb6 | /tests/openmetrics/test_exposition.py | 564627224d2c6d41bf3d5485683a668fe5863941 | [
"Apache-2.0",
"BSD-2-Clause"
] | permissive | leecalcote/client_python | dbbe858d5e21f3ada39a7f3fdb934afeb17b91ae | c0cee96e3cbd6f9c2a768502dcc2be3a7330abb1 | refs/heads/master | 2020-03-29T20:57:13.305625 | 2018-09-24T14:54:56 | 2018-09-24T14:54:56 | 149,500,043 | 0 | 0 | Apache-2.0 | 2018-09-19T19:14:04 | 2018-09-19T19:14:03 | null | UTF-8 | Python | false | false | 8,001 | py | from __future__ import unicode_literals
import sys
import time
if sys.version_info < (2, 7):
# We need the skip decorators from unittest2 on Python 2.6.
import unittest2 as unittest
else:
import unittest
from prometheus_client import Gauge, Counter, Summary, Histogram, Info, Enum, Metric
from prometheus_client import CollectorRegistry
from prometheus_client.core import GaugeHistogramMetricFamily, Timestamp, Exemplar
from prometheus_client.openmetrics.exposition import (
generate_latest,
)
class TestGenerateText(unittest.TestCase):
def setUp(self):
self.registry = CollectorRegistry()
# Mock time so _created values are fixed.
self.old_time = time.time
time.time = lambda: 123.456
def tearDown(self):
time.time = self.old_time
def custom_collector(self, metric_family):
class CustomCollector(object):
def collect(self):
return [metric_family]
self.registry.register(CustomCollector())
def test_counter(self):
c = Counter('cc', 'A counter', registry=self.registry)
c.inc()
self.assertEqual(b'# HELP cc A counter\n# TYPE cc counter\ncc_total 1.0\ncc_created 123.456\n# EOF\n', generate_latest(self.registry))
def test_counter_total(self):
c = Counter('cc_total', 'A counter', registry=self.registry)
c.inc()
self.assertEqual(b'# HELP cc A counter\n# TYPE cc counter\ncc_total 1.0\ncc_created 123.456\n# EOF\n', generate_latest(self.registry))
def test_gauge(self):
g = Gauge('gg', 'A gauge', registry=self.registry)
g.set(17)
self.assertEqual(b'# HELP gg A gauge\n# TYPE gg gauge\ngg 17.0\n# EOF\n', generate_latest(self.registry))
def test_summary(self):
s = Summary('ss', 'A summary', ['a', 'b'], registry=self.registry)
s.labels('c', 'd').observe(17)
self.assertEqual(b'''# HELP ss A summary
# TYPE ss summary
ss_count{a="c",b="d"} 1.0
ss_sum{a="c",b="d"} 17.0
ss_created{a="c",b="d"} 123.456
# EOF
''', generate_latest(self.registry))
@unittest.skipIf(sys.version_info < (2, 7), "Test requires Python 2.7+.")
def test_histogram(self):
s = Histogram('hh', 'A histogram', registry=self.registry)
s.observe(0.05)
self.assertEqual(b'''# HELP hh A histogram
# TYPE hh histogram
hh_bucket{le="0.005"} 0.0
hh_bucket{le="0.01"} 0.0
hh_bucket{le="0.025"} 0.0
hh_bucket{le="0.05"} 1.0
hh_bucket{le="0.075"} 1.0
hh_bucket{le="0.1"} 1.0
hh_bucket{le="0.25"} 1.0
hh_bucket{le="0.5"} 1.0
hh_bucket{le="0.75"} 1.0
hh_bucket{le="1.0"} 1.0
hh_bucket{le="2.5"} 1.0
hh_bucket{le="5.0"} 1.0
hh_bucket{le="7.5"} 1.0
hh_bucket{le="10.0"} 1.0
hh_bucket{le="+Inf"} 1.0
hh_count 1.0
hh_sum 0.05
hh_created 123.456
# EOF
''', generate_latest(self.registry))
def test_histogram_exemplar(self):
class MyCollector(object):
def collect(self):
metric = Metric("hh", "help", 'histogram')
# This is not sane, but it covers all the cases.
metric.add_sample("hh_bucket", {"le": "1"}, 0, None, Exemplar({'a': 'b'}, 0.5))
metric.add_sample("hh_bucket", {"le": "2"}, 0, None, Exemplar({'le': '7'}, 0.5, 12))
metric.add_sample("hh_bucket", {"le": "3"}, 0, 123, Exemplar({'a': 'b'}, 2.5, 12))
metric.add_sample("hh_bucket", {"le": "4"}, 0, None, Exemplar({'a': '\n"\\'}, 3.5))
metric.add_sample("hh_bucket", {"le": "+Inf"}, 0, None, None)
yield metric
self.registry.register(MyCollector())
self.assertEqual(b'''# HELP hh help
# TYPE hh histogram
hh_bucket{le="1"} 0.0 # {a="b"} 0.5
hh_bucket{le="2"} 0.0 # {le="7"} 0.5 12
hh_bucket{le="3"} 0.0 123 # {a="b"} 2.5 12
hh_bucket{le="4"} 0.0 # {a="\\n\\"\\\\"} 3.5
hh_bucket{le="+Inf"} 0.0
# EOF
''', generate_latest(self.registry))
def test_nonhistogram_exemplar(self):
class MyCollector(object):
def collect(self):
metric = Metric("hh", "help", 'untyped')
# This is not sane, but it covers all the cases.
metric.add_sample("hh_bucket", {}, 0, None, Exemplar({'a': 'b'}, 0.5))
yield metric
self.registry.register(MyCollector())
with self.assertRaises(ValueError):
generate_latest(self.registry)
def test_nonhistogram_bucket_exemplar(self):
class MyCollector(object):
def collect(self):
metric = Metric("hh", "help", 'histogram')
# This is not sane, but it covers all the cases.
metric.add_sample("hh_count", {}, 0, None, Exemplar({'a': 'b'}, 0.5))
yield metric
self.registry.register(MyCollector())
with self.assertRaises(ValueError):
generate_latest(self.registry)
def test_gaugehistogram(self):
self.custom_collector(GaugeHistogramMetricFamily('gh', 'help', buckets=[('1.0', 4), ('+Inf', (5))]))
self.assertEqual(b'''# HELP gh help
# TYPE gh gaugehistogram
gh_bucket{le="1.0"} 4.0
gh_bucket{le="+Inf"} 5.0
# EOF
''', generate_latest(self.registry))
def test_info(self):
i = Info('ii', 'A info', ['a', 'b'], registry=self.registry)
i.labels('c', 'd').info({'foo': 'bar'})
self.assertEqual(b'''# HELP ii A info
# TYPE ii info
ii_info{a="c",b="d",foo="bar"} 1.0
# EOF
''', generate_latest(self.registry))
def test_enum(self):
i = Enum('ee', 'An enum', ['a', 'b'], registry=self.registry, states=['foo', 'bar'])
i.labels('c', 'd').state('bar')
self.assertEqual(b'''# HELP ee An enum
# TYPE ee stateset
ee{a="c",b="d",ee="foo"} 0.0
ee{a="c",b="d",ee="bar"} 1.0
# EOF
''', generate_latest(self.registry))
def test_unicode(self):
c = Counter('cc', '\u4500', ['l'], registry=self.registry)
c.labels('\u4500').inc()
self.assertEqual(b'''# HELP cc \xe4\x94\x80
# TYPE cc counter
cc_total{l="\xe4\x94\x80"} 1.0
cc_created{l="\xe4\x94\x80"} 123.456
# EOF
''', generate_latest(self.registry))
def test_escaping(self):
c = Counter('cc', 'A\ncount\\er\"', ['a'], registry=self.registry)
c.labels('\\x\n"').inc(1)
self.assertEqual(b'''# HELP cc A\\ncount\\\\er\\"
# TYPE cc counter
cc_total{a="\\\\x\\n\\""} 1.0
cc_created{a="\\\\x\\n\\""} 123.456
# EOF
''', generate_latest(self.registry))
def test_nonnumber(self):
class MyNumber(object):
def __repr__(self):
return "MyNumber(123)"
def __float__(self):
return 123.0
class MyCollector(object):
def collect(self):
metric = Metric("nonnumber", "Non number", 'untyped')
metric.add_sample("nonnumber", {}, MyNumber())
yield metric
self.registry.register(MyCollector())
self.assertEqual(b'# HELP nonnumber Non number\n# TYPE nonnumber unknown\nnonnumber 123.0\n# EOF\n', generate_latest(self.registry))
def test_timestamp(self):
class MyCollector(object):
def collect(self):
metric = Metric("ts", "help", 'unknown')
metric.add_sample("ts", {"foo": "a"}, 0, 123.456)
metric.add_sample("ts", {"foo": "b"}, 0, -123.456)
metric.add_sample("ts", {"foo": "c"}, 0, 123)
metric.add_sample("ts", {"foo": "d"}, 0, Timestamp(123, 456000000))
metric.add_sample("ts", {"foo": "e"}, 0, Timestamp(123, 456000))
metric.add_sample("ts", {"foo": "f"}, 0, Timestamp(123, 456))
yield metric
self.registry.register(MyCollector())
self.assertEqual(b'''# HELP ts help
# TYPE ts unknown
ts{foo="a"} 0.0 123.456
ts{foo="b"} 0.0 -123.456
ts{foo="c"} 0.0 123
ts{foo="d"} 0.0 123.456000000
ts{foo="e"} 0.0 123.000456000
ts{foo="f"} 0.0 123.000000456
# EOF
''', generate_latest(self.registry))
if __name__ == '__main__':
unittest.main()
| [
"brian.brazil@robustperception.io"
] | brian.brazil@robustperception.io |
28e1256708ac01d0ef1439aa6f1a4581420b3bb9 | 0498b81e07dd428fe6b1e405b7589e636aece8cb | /DonneesFormulaire/venv/Scripts/pip3.7-script.py | f4c7d2468e286929af7ce8285f323aa1de21ed93 | [] | no_license | YasmineSAYAD/FormationPython | 03edcc1c2588d9371f75e268333a12e4afb6e362 | b22cca087c29bdf20cab794340c4b510d55bf543 | refs/heads/main | 2023-01-20T23:02:15.575437 | 2020-11-30T17:37:32 | 2020-11-30T17:37:32 | 317,310,445 | 0 | 0 | null | null | null | null | ISO-8859-1 | Python | false | false | 428 | py | #!C:\Users\hp\PycharmProjects\DonnéesFormulaire\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.7'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.7')()
)
| [
"yasminesayad123@gmail.com"
] | yasminesayad123@gmail.com |
c9df41b736180146356179434bb8859b8b1e2619 | 4278102a326700c8020356864724b510062a93a3 | /venv/bin/rstpep2html.py | 84316fec067be6b188aef009426cacf25bdabff3 | [] | no_license | kuda1992/CloudComputingCapstoneStreaming | 6ca77e657a695857067a4db5fceb1525b4fd7608 | 0415fbbf0b49d690db08193ac0f30257eba72600 | refs/heads/master | 2020-06-09T20:17:18.610304 | 2019-07-07T22:01:43 | 2019-07-07T22:01:43 | 193,499,197 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | #!/Users/kuda/PycharmProjects/CloudComputingCapstone/venv/bin/python
# $Id: rstpep2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML from PEP
(Python Enhancement Proposal) documents.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML from reStructuredText-format PEP files. '
+ default_description)
publish_cmdline(reader_name='pep', writer_name='pep_html',
description=description)
| [
"kuda.nyamainashe@ext.soprasteria.com"
] | kuda.nyamainashe@ext.soprasteria.com |
4486200e73e58d84f2c643939596b3276a0507e6 | 91a39a4644200fc6c4ed5d8cc98c81411152767d | /app/v0/resources/Status.py | 7c4b782309566bf505f93a17df0229508e57c29f | [] | no_license | supratik94/algoshelf_backend_engineer | d652b8324ab673252c534c6389683528a2cd7234 | e50fb7842b11ec98b5c292f01e644ad5a08d2745 | refs/heads/master | 2020-08-22T11:47:46.576926 | 2019-10-21T15:56:30 | 2019-10-21T15:56:30 | 216,387,334 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | __author__ = "Supratik Majumdar"
__status__ = "Development"
from .Resource import API_Resource_v0
from ...models import Status as StatusSchema
class Status(API_Resource_v0):
def get(self):
statuses = list()
results = self.session.query(StatusSchema).all()
for result in results:
statuses.append(result.status)
return statuses
| [
"supratikmajumdar94@gmail.com"
] | supratikmajumdar94@gmail.com |
872c78f5a5158413f4da1dcfb0918b692309b540 | 97af030d489f0bcc2d6d6a6cd2790e2e870d1a81 | /socks5-node-docker/server.py | 6665937ebea1948cdaf0950151aab7e84bd5ef0a | [
"MIT"
] | permissive | sentinel-official/sentinel | 42b44484795fbf41974868f3d88d5e4be298f0a6 | d0238f4ff21d0a7f2d684f645e47cff85550f0d9 | refs/heads/master | 2021-06-02T01:48:27.452187 | 2021-04-13T16:50:00 | 2021-04-13T16:50:00 | 102,737,071 | 359 | 78 | MIT | 2021-04-13T16:50:01 | 2017-09-07T12:57:06 | JavaScript | UTF-8 | Python | false | false | 583 | py | # coding=utf-8
import json
import falcon
from sentinel.server import GetSockCreds
from sentinel.server import Token
from sentinel.utils import JSONTranslator
class Up(object):
def on_post(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = json.dumps({'status': 'UP'})
def on_get(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = json.dumps({'status': 'UP'})
server = falcon.API(middleware=[JSONTranslator()])
server.add_route('/', Up())
server.add_route('/token', Token())
server.add_route('/creds', GetSockCreds())
| [
"riotcse@gmail.com"
] | riotcse@gmail.com |
02023b78130a61af3e1d7572f7dbd62210699420 | b09a75d77f618a51e1aa01a95939b54835c73da5 | /Lab 2/Q2.py | 947141649db4c899ecbaf7a78a10c92958168bf3 | [] | no_license | itzzhammy/Data-Science | 1017a3bc8f4bd673b385d074e8f99583a1856ee5 | 9d9781523b45e26e927bb0827eb6c5e3d4ab5fe4 | refs/heads/master | 2021-01-25T11:55:17.931962 | 2018-03-01T13:51:24 | 2018-03-01T13:51:24 | 123,440,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | from math import pi
r=int(input("enter r \n"))
c=2*pi*r
print (c)
a=pi*r**2
print (a) | [
"humza.murtaza@gmail.com"
] | humza.murtaza@gmail.com |
1114a68d8b2e5c4fd05992b6c8ee4ca498cc92af | 755e4e6e966433fe887f0f28f14916696b1588d7 | /code/exceptions/exceptions.py | 7e62995995ecbc57b12ce62c9ad0de8d73a94b9e | [] | no_license | phildue/FingerspellingRecognition | f18518a6e2e29b769d131e5b54846f00213f3ff1 | 1b5236142734d7b50f0f4161ecc533b7d10347b8 | refs/heads/master | 2021-03-24T10:40:24.507766 | 2017-07-03T09:33:09 | 2017-07-03T09:33:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 167 | py | class NotTrained(Exception):
pass
class NoRoiFound(Exception):
pass
class NoContoursFound(Exception):
pass
class DescriptorFailed(Exception):
pass | [
"phild@protonmail.com"
] | phild@protonmail.com |
3b08996888e72dea09176f6ad60355a106ca9859 | c1fe92bcaaffe896bac77ed1f1b24afb94bcf7df | /turboquant/blueprints/billing/gateways/stripecom.py | f05f083442633e79c6a298c06734df101a06904f | [] | no_license | cg94301/turboquant | a7eff8a04e468a5b77dc893aff16e3709715ac51 | 501c5440eb76cf4d8225e3d83bb2795bd8b22945 | refs/heads/master | 2023-02-17T20:07:34.955772 | 2021-01-17T16:44:22 | 2021-01-17T16:44:22 | 312,879,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,872 | py | import stripe
class Event(object):
@classmethod
def retrieve(cls, event_id):
"""
Retrieve an event, this is used to validate the event in attempt to
protect us from potentially malicious events not sent from Stripe.
API Documentation:
https://stripe.com/docs/api#retrieve_event
:param event_id: Stripe event id
:type event_id: int
:return: Stripe event
"""
return stripe.Event.retrieve(event_id)
class Customer(object):
@classmethod
def create(cls, token=None, email=None, coupon=None, plan=None):
"""
Create a new customer.
API Documentation:
https://stripe.com/docs/api#create_customer
:param token: Token returned by JavaScript
:type token: str
:param email: E-mail address of the customer
:type email: str
:param coupon: Coupon code
:type coupon: str
:param plan: Plan identifier
:type plan: str
:return: Stripe customer
"""
params = {
'source': token,
'email': email
}
if plan:
params['plan'] = plan
if coupon:
params['coupon'] = coupon
return stripe.Customer.create(**params)
class Charge(object):
@classmethod
def create(cls, customer_id=None, currency=None, amount=None):
"""
Create a new charge.
:param customer_id: Stripe customer id
:type customer_id: int
:param amount: Stripe currency
:type amount: str
:param amount: Amount in cents
:type amount: int
:return: Stripe charge
"""
foo = stripe.Charge.create(
amount=amount,
currency=currency,
customer=customer_id,
statement_descriptor='SNAKEEYES COINS')
return foo
class Coupon(object):
@classmethod
def create(cls, code=None, duration=None, amount_off=None,
percent_off=None, currency=None, duration_in_months=None,
max_redemptions=None, redeem_by=None):
"""
Create a new coupon.
API Documentation:
https://stripe.com/docs/api#create_coupon
:param code: Coupon code
:param duration: How long the coupon will be in effect
:type duration: str
:param amount_off: Discount in a fixed amount
:type amount_off: int
:param percent_off: Discount based on percent off
:type percent_off: int
:param currency: 3 digit currency abbreviation
:type currency: str
:param duration_in_months: Number of months in effect
:type duration_in_months: int
:param max_redemptions: Max number of times it can be redeemed
:type max_redemptions: int
:param redeem_by: Redeemable by this date
:type redeem_by: date
:return: Stripe coupon
"""
return stripe.Coupon.create(id=code,
duration=duration,
amount_off=amount_off,
percent_off=percent_off,
currency=currency,
duration_in_months=duration_in_months,
max_redemptions=max_redemptions,
redeem_by=redeem_by)
@classmethod
def delete(cls, id=None):
"""
Delete an existing coupon.
API Documentation:
https://stripe.com/docs/api#delete_coupon
:param id: Coupon code
:return: Stripe coupon
"""
coupon = stripe.Coupon.retrieve(id)
return coupon.delete()
class Card(object):
@classmethod
def update(cls, customer_id, stripe_token=None):
"""
Update an existing card through a customer.
API Documentation:
https://stripe.com/docs/api/python#update_card
:param customer_id: Stripe customer id
:type customer_id: int
:param stripe_token: Stripe token
:type stripe_token: str
:return: Stripe customer
"""
customer = stripe.Customer.retrieve(customer_id)
customer.source = stripe_token
return customer.save()
class Invoice(object):
@classmethod
def upcoming(cls, customer_id):
"""
Retrieve an upcoming invoice item for a user.
API Documentation:
https://stripe.com/docs/api#retrieve_customer_invoice
:param customer_id: Stripe customer id
:type customer_id: int
:return: Stripe invoice
"""
return stripe.Invoice.upcoming(customer=customer_id)
class Subscription(object):
@classmethod
def update(cls, customer_id=None, coupon=None, plan=None):
"""
Update an existing subscription.
API Documentation:
https://stripe.com/docs/api/python#update_subscription
:param customer_id: Customer id
:type customer_id: str
:param coupon: Coupon code
:type coupon: str
:param plan: Plan identifier
:type plan: str
:return: Stripe subscription
"""
customer = stripe.Customer.retrieve(customer_id)
subscription_id = customer.subscriptions.data[0].id
subscription = customer.subscriptions.retrieve(subscription_id)
subscription.plan = plan
if coupon:
subscription.coupon = coupon
return subscription.save()
@classmethod
def cancel(cls, customer_id=None):
"""
Cancel an existing subscription.
API Documentation:
https://stripe.com/docs/api#cancel_subscription
:param customer_id: Stripe customer id
:type customer_id: int
:return: Stripe subscription object
"""
customer = stripe.Customer.retrieve(customer_id)
subscription_id = customer.subscriptions.data[0].id
return customer.subscriptions.retrieve(subscription_id).delete()
class Product(object):
@classmethod
def retrieve(cls, plan):
"""
Retrieve an existing product.
API Documentation:
https://stripe.com/docs/api#retrieve_product
:param plan: Product identifier
:type plan: str
:return: Stripe product
"""
try:
return stripe.Product.retrieve(plan)
except stripe.error.StripeError as e:
print(e)
class Plan(object):
@classmethod
def retrieve(cls, plan):
"""
Retrieve an existing plan.
API Documentation:
https://stripe.com/docs/api#retrieve_plan
:param plan: Plan identifier
:type plan: str
:return: Stripe plan
"""
try:
return stripe.Plan.retrieve(plan)
except stripe.error.StripeError as e:
print(e)
@classmethod
def list(cls):
"""
List all plans.
API Documentation:
https://stripe.com/docs/api#list_plans
:return: Stripe plans
"""
try:
return stripe.Plan.list()
except stripe.error.StripeError as e:
print(e)
@classmethod
def create(cls, id=None, name=None, amount=None, currency=None,
interval=None, interval_count=None, trial_period_days=None,
metadata=None, statement_descriptor=None):
"""
Create a new plan.
API Documentation:
https://stripe.com/docs/api#create_plan
:param id: Plan identifier
:type id: str
:param name: Plan name
:type name: str
:param amount: Amount in cents to charge or 0 for a free plan
:type amount: int
:param currency: 3 digit currency abbreviation
:type currency: str
:param interval: Billing frequency
:type interval: str
:param interval_count: Number of intervals between each bill
:type interval_count: int
:param trial_period_days: Number of days to run a free trial
:type trial_period_days: int
:param metadata: Additional data to save with the plan
:type metadata: dct
:param statement_descriptor: Arbitrary string to appear on CC statement
:type statement_descriptor: str
:return: Stripe plan
"""
try:
product = {
"name": name,
"statement_descriptor": statement_descriptor
}
return stripe.Plan.create(id=id,
amount=amount,
currency=currency,
interval=interval,
interval_count=interval_count,
trial_period_days=trial_period_days,
nickname=name,
metadata=metadata,
product=product
)
except stripe.error.StripeError as e:
print(e)
@classmethod
def update(cls, id=None, name=None, metadata=None,
statement_descriptor=None):
"""
Update an existing plan.
API Documentation:
https://stripe.com/docs/api#update_plan
:param id: Plan identifier
:type id: str
:param name: Plan name
:type name: str
:param metadata: Additional data to save with the plan
:type metadata: dct
:param statement_descriptor: Arbitrary string to appear on CC statement
:type statement_descriptor: str
:return: Stripe plan
"""
try:
plan = stripe.Plan.retrieve(id)
plan.nickname = name
plan.metadata = metadata
product_id = plan.product
updated_plan = plan.save()
product = Product.retrieve(product_id)
product.name = name
product.statement_descriptor = statement_descriptor
product.save()
return updated_plan
except stripe.error.StripeError as e:
print(e)
@classmethod
def delete(cls, plan):
"""
Delete an existing plan.
API Documentation:
https://stripe.com/docs/api#delete_plan
:param plan: Plan identifier
:type plan: str
:return: Stripe plan object
"""
try:
plan = stripe.Plan.retrieve(plan)
product_id = plan.product
deleted_plan = plan.delete()
product = Product.retrieve(product_id)
product.delete()
return deleted_plan
except stripe.error.StripeError as e:
print(e)
| [
"cg94301@gmx.com"
] | cg94301@gmx.com |
c3b9ff5b8152f420b706905dcff70884f3058ed5 | 6daf7a3b7efa5b78c93dca8f672b2f84c6cdc680 | /test_app.py | 597613c907de2442b18e5d131200b35cb24fe7dc | [] | no_license | Jakejamesreid/Interview-For-Tines | 6411ced1dd6a6fa74def629d4e816a9bf7de926b | b8e163dd4d5c79992a357859de2cb2282fa0983c | refs/heads/master | 2022-10-13T23:51:50.735501 | 2020-06-13T10:04:07 | 2020-06-13T10:04:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,485 | py | import unittest
import os
import app
import datetime
"""
This test suite tests that the methods defined in app.py work as expected
"""
class TinesTestCase(unittest.TestCase):
"""
Positive test cases
"""
# Test findEventParameters method with 1 parameter
def test_findEventParameters_single_parameter(self):
#Arrange
url = "https://www.domain.com?param={{agent.parameter1}}"
# Act
matches = app.findEventParameters(url)
result = []
for match in matches:
result.append(match.group(1))
#Assert
self.assertEqual(result, ["agent.parameter1"])
# Test findEventParameters method with 2 parameters
def test_findEventParameters_two_parameters(self):
# Arrange
url = "https://www.domain.com?param={{agent.parameter1}}¶m2={{agent.parameter2}}"
# Act
matches = app.findEventParameters(url)
result = []
for match in matches:
result.append(match.group(1))
# Assert
self.assertEqual(result, ["agent.parameter1","agent.parameter2"])
# Test findEventParameters method with 3 parameters
def test_findEventParameters_five_parameters(self):
# Arrange
url = "https://www.domain.com?param={{agent.parameter1}}¶m2={{agent.parameter2}}¶m3={{agent.parameter3}}¶m4={{agent.parameter4}}¶m5={{agent.parameter5}}"
# Act
matches = app.findEventParameters(url)
result = []
for match in matches:
result.append(match.group(1))
# Assert
self.assertEqual(result, ["agent.parameter1","agent.parameter2","agent.parameter3","agent.parameter4","agent.parameter5"])
# Test updateTemplateValue method using a URL thjat contains only 1 parameter values are NOT nested in sub dictionaries, e.g., sunset.results.sunset
def test_updateTemplateValue_single_parameter_no_nesting_in_variables(self):
# Arrange
url = "https://api.sunrise-sunset.org/json?lat={{location.latitude}}"
httpReqObjects = {"location":{"latitude": "54.5"}}
# Act
matches = app.findEventParameters(url)
url = app.updateTemplateValue(matches, url, httpReqObjects)
# Assert
self.assertEqual(url, "https://api.sunrise-sunset.org/json?lat=54.5")
# Test updateTemplateValue method using a URL thjat contains only 1 parameter values ARE nested in sub dictionaries, e.g., sunset.results.sunset
def test_updateTemplateValue_single_parameter_nesting_in_variables(self):
# Arrange
message = "Sunset at {{sunset.results.sunset}}."
httpReqObjects = {"sunset":{"results": {"sunset": "8:00:00 PM"}}}
# Act
matches = app.findEventParameters(message)
message = app.updateTemplateValue(matches, message, httpReqObjects)
# Assert
self.assertEqual(message, "Sunset at 8:00:00 PM.")
# Test updateTemplateValue method using a URL thjat contains only multiple parameters, values are NOT nested in sub dictionaries, e.g., sunset.results.sunset
def test_updateTemplateValue_multi_parameter_no_nesting_in_variables(self):
# Arrange
url = "https://api.sunrise-sunset.org/json?lat={{location.latitude}}&lng={{location.longitude}}"
httpReqObjects = {"location":{"latitude": "54.5", "longitude": "35.4"}}
# Act
matches = app.findEventParameters(url)
url = app.updateTemplateValue(matches, url, httpReqObjects)
# Assert
self.assertEqual(url, "https://api.sunrise-sunset.org/json?lat=54.5&lng=35.4")
# Test updateTemplateValue method using a URL thjat contains multiple parameters, values ARE nested in sub dictionaries, e.g., sunset.results.sunset
def test_updateTemplateValue_multi_parameter_nesting_in_variables(self):
# Arrange
message = "Sunrise at {{sunrise.results.sunrise}}, sunset at {{sunset.results.sunset}}."
httpReqObjects = {
"sunrise":{"results": {"sunrise": "7:00:00 AM"}},
"sunset":{"results": {"sunset": "8:00:00 PM"}}
}
# Act
matches = app.findEventParameters(message)
message = app.updateTemplateValue(matches, message, httpReqObjects)
# Assert
self.assertEqual(message, "Sunrise at 7:00:00 AM, sunset at 8:00:00 PM.")
# Test updateTemplateValue method using a a parameters whose variable is not a string
def test_updateTemplateValue_single_parameter_with_non_string_variable(self):
# Arrange
message = "Sunrise at {{sunrise.results.sunrise}}."
sunrise = datetime.time(7)
httpReqObjects = {"sunrise":{"results": {"sunrise": sunrise}}}
# Act
matches = app.findEventParameters(message)
message = app.updateTemplateValue(matches, message, httpReqObjects)
# Assert
self.assertEqual(message, "Sunrise at 07:00:00.")
"""
Negative test cases
"""
# Test findEventParameters method with an invalid parameter structure
def test_findEventParameters_invalid_parameter_structure(self):
# Arrange
url = "https://www.domain.com?param={agent.parameter1}¶m2={{}}¶m3={{agent}}"
# Act
matches = app.findEventParameters(url)
# Assert
result = next(matches, None)
self.assertFalse(result)
if __name__ == '__main__':
unittest.main() | [
"jakejamesreid@gmail.com"
] | jakejamesreid@gmail.com |
6a50f6dc840ad5ee463050db663639df9a8ea7dd | e8b12e314782bf68347838599c8168e4a8019373 | /CompareAlternatives.py | 0d80231eb7ed1c3ac5094ee2f446c2fa5eed2155 | [] | no_license | HPM573/Lab_ParallelProcessing | 0ce7e4b615afe9e2e2a281f79684e9067003aa1b | f2e6401f4a5dc057a150914653079c0284c92b4b | refs/heads/main | 2023-05-12T06:03:15.275404 | 2023-05-02T13:58:18 | 2023-05-02T13:58:18 | 180,822,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,908 | py | import EconEvalInputData as D
import ProbabilisticSupport as Support
import ProbilisticParamClasses as P
from ParallelClasses import ParallelMultiCohort
N_COHORTS = 200 # number of cohorts
if __name__ == '__main__': # this line is needed to avoid errors that occur on Windows computers
# create a multi-cohort to simulate under mono therapy
multiCohortMono = ParallelMultiCohort(
ids=range(N_COHORTS),
pop_size=D.POP_SIZE,
therapy=P.Therapies.MONO
)
multiCohortMono.simulate(sim_length=D.SIM_LENGTH)
# create a multi-cohort to simulate under combi therapy
multiCohortCombo = ParallelMultiCohort(
ids=range(N_COHORTS),
pop_size=D.POP_SIZE,
therapy=P.Therapies.COMBO
)
multiCohortCombo.simulate(sim_length=D.SIM_LENGTH)
# print the estimates for the mean survival time and mean time to AIDS
Support.print_outcomes(multi_cohort_outcomes=multiCohortMono.multiCohortOutcomes,
therapy_name=P.Therapies.MONO)
Support.print_outcomes(multi_cohort_outcomes=multiCohortCombo.multiCohortOutcomes,
therapy_name=P.Therapies.COMBO)
# draw survival curves and histograms
Support.plot_survival_curves_and_histograms(multi_cohort_outcomes_mono=multiCohortMono.multiCohortOutcomes,
multi_cohort_outcomes_combo=multiCohortCombo.multiCohortOutcomes)
# print comparative outcomes
Support.print_comparative_outcomes(multi_cohort_outcomes_mono=multiCohortMono.multiCohortOutcomes,
multi_cohort_outcomes_combo=multiCohortCombo.multiCohortOutcomes)
# report the CEA results
Support.report_CEA_CBA(multi_cohort_outcomes_mono=multiCohortMono.multiCohortOutcomes,
multi_cohort_outcomes_combo=multiCohortCombo.multiCohortOutcomes) | [
"reza.yaesoubi@yale.edu"
] | reza.yaesoubi@yale.edu |
88428e03fec38c48b75093bd1e9e491db1ccb30e | 274de9c53f1eb41be27433cd969211d741a3b640 | /Clean/migrate.py | aa60e1082b9432ed1cb6373382032666f1fcd3c8 | [] | no_license | cheddarek/cheddarekproject | 3a9428b310e55b782722652115ce2806bec464f0 | 648e374ec03f4d0d59fad23c6a54be3314a0f79d | refs/heads/master | 2021-05-26T23:33:40.448712 | 2020-04-13T20:28:38 | 2020-04-13T20:28:38 | 254,188,998 | 0 | 0 | null | 2020-04-10T03:11:54 | 2020-04-08T20:05:54 | HTML | UTF-8 | Python | false | false | 311 | py | from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from Model import db
from run import create_app
app = create_app('config')
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
if __name__ == '__main__':
manager.run()
| [
"noreply@github.com"
] | noreply@github.com |
1b8b4359bdbbabbe6d0256e83de7ef2f1abc8482 | 793de7bd510c0b6509f1413353d912bc8ef9bfb0 | /asset-v1_MITx+6.00.1x+2T2017+type@asset+block@Lecture13/dataExample.py | 36cd4a9ac941b27d8585c211845009333bf3a56b | [] | no_license | DamienOConnell/MIT-600.1x | eb24490bb5148348d4b092db5a776a41ec1c6819 | 319d45bbbea991b9342c99874d8aad1dd6dc5d38 | refs/heads/master | 2020-06-06T03:37:19.904231 | 2019-12-15T04:40:17 | 2019-12-15T04:40:17 | 192,628,180 | 0 | 0 | null | 2019-10-21T09:25:54 | 2019-06-19T00:05:07 | Python | UTF-8 | Python | false | false | 435 | py | #!/usr/bin/env python3
#
# -*- coding: utf-8 -*-
#
"""
Created on Sun Jun 12 13:33:39 2016
@author: ericgrimson
"""
import pylab as plt
def fib(n):
if n == 1 or n == 2:
return 1
else:
return fib(n - 1) + fib(n - 2)
inputs = []
results = []
for i in range(5):
print(fib(i))
def displayFibs(n):
(xvals, yvals) = gatherFacts(n)
plt.figure("fibs")
plt.plot(xvals, yvals, label="fibonacci")
| [
"oconnell.damien@gmail.com"
] | oconnell.damien@gmail.com |
dc4ee8e84412fbe9e26fa41aea2ba61f0a80d687 | 3b11dc40c7d772fffeb4d8683e5c9791c41f6454 | /custom/clients/ecobank/ecobank_inventory/models/inventory_account.py | 54cf03c640a891acbae5ed78bf433efd0cd027f2 | [] | no_license | Jacky-odoo/Ecobank | b986352abac9416ab00008a4abaec2b1f1a1f262 | 5c501bd03a22421f47c76380004bf3d62292f79d | refs/heads/main | 2023-03-09T18:10:45.058530 | 2021-02-25T14:11:12 | 2021-02-25T14:11:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | from odoo import api, fields, models
from odoo.exceptions import ValidationError
class InventoryUser(models.Model):
_name = 'inventory.account'
_rec_name = 'name_and_code'
name = fields.Char(string='Name', required=True)
code = fields.Char(string='Code', required=True)
name_and_code = fields.Char(compute='compute_name_code', store=True)
@api.multi
@api.depends('name', 'code')
def compute_name_code(self):
for rec in self:
if rec.code and rec.name:
rec.name_and_code = str(rec.name + " (" + rec.code + ")")
@api.multi
def copy(self, default=None):
raise ValidationError("Sorry you are not allowed to perform this operation. Error Code BYT001")
@api.constrains('name')
def check_name(self):
all_accounts = self.search([])
for account in all_accounts:
if self.name.lower() == account.name.lower() and self.id != account.id:
raise ValidationError("Error! Account Name already exist. BYT005")
_sql_constraints = [
('unique_code', 'unique (code)', "Account Code Already Exist !"),
]
| [
"francisbnagura@gmail.com"
] | francisbnagura@gmail.com |
44e249876f0bd5aca94dc32337cd2e718f6e302a | 8f64409717f221474fab2ce4d53d0f37cdb2bed0 | /products/forms.py | 864f1084a91337e18a6638c227bc4006f0b4f59c | [] | no_license | teemoteemo0318/PBC_final | 5e7626dfe7b16c20247cee529e457e4b95ecd3b9 | f0d965d947aca84b3a9a2e42b6d24951e540e997 | refs/heads/main | 2023-02-14T15:44:46.302694 | 2021-01-05T16:56:15 | 2021-01-05T16:56:15 | 321,557,381 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,405 | py | from django import forms
from django.core.exceptions import ValidationError
from datetime import date
import requests
import pandas as pd
class Ticker(forms.Form):
ticker = forms.CharField(label='股票代碼', initial='0050')
start_date = forms.DateField(label='開始日期', initial='2020-01-01', widget=forms.DateInput(attrs={'type':'date'}))
end_date = forms.DateField(label='結束日期', initial=date.today().strftime("%Y-%m-%d"), widget=forms.DateInput(attrs={'type':'date'}))
def clean(self):
cleaned_data = super().clean()
start_date = cleaned_data["start_date"]
end_date = cleaned_data["end_date"]
ticker = cleaned_data['ticker']
url = "https://api.finmindtrade.com/api/v3/data"
parameter = {
"dataset": "TaiwanStockInfo",
}
resp = requests.get(url, params=parameter)
data = resp.json()
stock_id = pd.DataFrame(data["data"])
if end_date < start_date:
msg = "開始日期需早於結束日期"
raise forms.ValidationError(msg)
today_date = date.today()
if end_date > today_date:
msg = "結束日期不應大於今天日期"
raise forms.ValidationError(msg)
if ticker not in stock_id['stock_id'].values:
msg = "無此股票代碼"
raise forms.ValidationError(msg)
| [
"teemoteemo0318@gmail.com"
] | teemoteemo0318@gmail.com |
38eaeac29ebaa70dc88d888b36fe8d2e3156dd76 | 083b3f5b0d23c269c6a9ff1ea413e70fb799a497 | /Leetcode Challenge/09_September_2020/Python/Week 5/2_First Missing Positive.py | 5daf39e470ef89206f9440b17c1cc1717578a4f7 | [] | no_license | HectorIGH/Competitive-Programming | b2e02dff140d9ebb06c646f7be0b53ea0afe90c9 | 467058c63e8a7e76805feebe3020bac4d20516a6 | refs/heads/master | 2022-12-31T18:32:46.824626 | 2020-10-16T20:38:33 | 2020-10-16T20:38:33 | 279,733,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | #Given an unsorted integer array, find the smallest missing positive integer.
#
#Example 1:
#
#Input: [1,2,0]
#Output: 3
#Example 2:
#
#Input: [3,4,-1,1]
#Output: 2
#Example 3:
#
#Input: [7,8,9,11,12]
#Output: 1
#Follow up:
#
#Your algorithm should run in O(n) time and uses constant extra space.
#
# Hide Hint #1
#Think about how you would solve the problem in non-constant space. Can you apply that logic to the existing space?
# Hide Hint #2
#We don't care about duplicates or non-positive integers
# Hide Hint #3
#Remember that O(2n) = O(n)
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
'''
nums = set((n for n in nums if n > 0))
i = 1
while True:
if i not in nums:
return i
i += 1
'''
if len(nums) == 0:
return 1
n = len(nums);
containsone = False;
for i in range(n):
if nums[i] == 1:
containsone = True
break
if not containsone:
return 1
for i in range(n):
if nums[i] <= 0 or nums[i] > n:
nums[i] = 1
for i in range(n):
val = nums[i]
pos = abs(val) - 1
if nums[pos] > 0:
nums[pos] = -1 * nums[pos];
for i in range(n):
if nums[i] > 0:
return i + 1
return n + 1
| [
"HectorIGH@users.noreply.github.com"
] | HectorIGH@users.noreply.github.com |
77c326d8b4be8828d4ff340158b1355fd541aecb | b97edfc765baa1432fcef82596e2a2d48310cce0 | /algorithms/say.py | d6b5ed481550797d2a4d690ecaf72ebaaa5c6346 | [] | no_license | NicholasPiano/scripts | 4dadaa97bb1bb18bd51c5526b0a57c7ddc554fce | 76b610af49128f0434419fcd75be606ef5efbb37 | refs/heads/master | 2020-05-31T01:29:43.284877 | 2015-01-05T02:00:28 | 2015-01-05T02:00:28 | 27,193,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | #!usr/bin/python3
import os
os.system('say vizontlatahshrah') | [
"nicholas.d.piano@gmail.com"
] | nicholas.d.piano@gmail.com |
101e6d98e6ea5327b9632183ef8eb52de0c552e9 | ff5eea95bb0827cb086c32f4ec1c174b28e5b82d | /gammapy/background/tests/test_ring.py | 047cad9a887193d1551fbd48446204c72bfc2e9e | [] | no_license | pflaumenmus/gammapy | 4830cc5506a4052658f30077fa4e11d8c685ede0 | 7b5caf832c9950c886528ca107203ce9b83c7ebf | refs/heads/master | 2021-01-15T23:27:46.521337 | 2013-09-25T14:23:35 | 2013-09-25T14:23:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import division
import unittest
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from astropy.io import fits
from ..maps import Maps
from ..ring import RingBgMaker, outer_ring_radius
try:
import scipy
HAS_SCIPY = True
except ImportError:
HAS_SCIPY = False
@pytest.mark.skipif('not HAS_SCIPY')
class TestRingBgMaker(unittest.TestCase):
def test_construction(self):
r = RingBgMaker(0.3, 0.5)
r.info()
def test_correlate(self):
image = np.zeros((10, 10))
image[5, 5] = 1
r = RingBgMaker(3, 6, 1)
image = r.correlate(image)
def test_correlate_maps(self):
n_on = np.ones((200, 200))
hdu = fits.ImageHDU(n_on, name='n_on')
maps = Maps([hdu])
maps['exclusion'].data[100:110, 100:110] = 0
r = RingBgMaker(10, 13, 1)
r.correlate_maps(maps)
class TestHelperFuntions(unittest.TestCase):
def test_compute_r_o(self):
actual = outer_ring_radius(1, 0, 1)
assert_almost_equal(actual, 1)
if __name__ == '__main__':
unittest.main()
| [
"Deil.Christoph@gmail.com"
] | Deil.Christoph@gmail.com |
b383b5c92021bb5c8a33532a9403d85bb62c3711 | 1b426a96bade22b522e87f61ed2834c536bfa70a | /SRC/XieCheng/11.py | 579f5a919702e969647df48c505377edc1d5275f | [] | no_license | wyt843/MyPython | 4b82ab1fca909d631f51dd075390276b7c0fd9b2 | 10b8aec3480e800f8009771794a65033249d9c01 | refs/heads/master | 2020-04-14T14:53:55.733807 | 2019-01-12T04:39:55 | 2019-01-12T04:39:55 | 163,910,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | #! -3
# coding:utf-8
import threading
# 引入异步IO包
import asyncio
# 使用协程
#@asyncio.coroutine
async def hello():
print("hello world!{}".format(threading.currentThread()))
print("start.....{}".format(threading.currentThread()))
# yield from asyncio.sleep(10)
await asyncio.sleep(10)
print("Done......{}".format(threading.currentThread()))
print("Hello again!{}".format(threading.currentThread()))
# 启动消息循环
loop = asyncio.get_event_loop()
# 定义任务
tasks = [hello(),hello()]
# asyncio 使用wait等待task执行完毕
loop.run_until_complete(asyncio.wait(tasks))
# 关闭消息循环
loop.close() | [
"wenyitao880901@126.com"
] | wenyitao880901@126.com |
6825136be63fc66500e4203d076fa8e265b566b4 | 70500d9a4f67970f0a92b2d5815357c064d972fd | /proj/settings.py | ba3d5cf2a77dd4d8ddf1aaceed7dd153adbcb473 | [] | no_license | 668Jerry/javascripttraining | e715a8a2553c91bf9d0b1d2ccc104fa516fe61cd | 50a325694e4f8959a77ff700b9ed2cb8f395bdbd | refs/heads/master | 2021-01-21T23:03:52.421167 | 2014-07-08T16:27:41 | 2014-07-08T16:27:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,470 | py | """
Django settings for proj project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f(g%-wkyc_&flw-3@fsqn(gt$lq-uj_)_ly*cl(%__m_-+!cab'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'proj.urls'
WSGI_APPLICATION = 'proj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
) | [
"668Jerry@gmail.com"
] | 668Jerry@gmail.com |
aed3798cdd0145f3e20c5e1702297ae9239874bb | 4641ae98ca768ec76787f18f2e5ceb764dcee9da | /mongo.py | cc9c2d5d0220b802557def2eb074127f43233c31 | [] | no_license | cashblxxd/ozon_site | 4663c2963a51e44a149c8f344d51836f2093c3d3 | 50097fc93546241c0edd8496a7f09d5fcbc4d5dc | refs/heads/master | 2023-02-05T07:19:06.995993 | 2020-09-24T12:54:13 | 2020-09-24T12:54:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,672 | py | import pymongo
from pprint import pprint
import secrets
from mongo_queue.queue import Queue
import string
import random
import gridfs
import datetime
from bson.objectid import ObjectId
def user_exist(email, password, client):
data = client.userdata.users.find_one({
"email": email,
"password": password
})
pprint(data)
if data is None:
return False, {}
return True, {
"email": data["email"],
"accounts_token": data["accounts_token"]
}
def user_create(email, password, client):
accounts_token = secrets.token_urlsafe()
client.userdata.users.insert_one({
"email": email,
"password": password,
"accounts_token": accounts_token
})
client.userdata.accounts.insert_one({
"token": accounts_token,
"order": [],
"data": {}
})
return True, {
"email": email,
"accounts_token": accounts_token
}
def change_password(email, old_password, new_password, client):
client.userdata.users.update_one({
"email": email,
"password": old_password
}, {"$set": {
"password": new_password
}})
def reset_password(email, new_password, client):
client.userdata.users.update_one({
"email": email
}, {"$set": {
"password": new_password
}})
def email_taken(email, client):
return not (client.userdata.users.find_one({
'email': email,
}) is None)
def put_confirmation_token(email, password, client):
confirmation_token = secrets.token_urlsafe()
client.userdata.confirmation_tokens.insert_one({
"token": confirmation_token,
"email": email,
"password": password
})
return confirmation_token
def get_confirmation_token(token, client):
data = client.userdata.confirmation_tokens.find_one({
"token": token
})
if data is None:
return False, "Not found"
email, password = data["email"], data["password"]
client.userdata.confirmation_tokens.delete_one(data)
return True, (email, password)
def put_reset_token(email, client):
reset_token = secrets.token_urlsafe()
client.userdata.confirmation_tokens.insert_one({
"token": reset_token,
"email": email
})
return reset_token
def get_reset_token(token, client):
data = client.userdata.confirmation_tokens.find_one({
"token": token
})
if data is None:
return False, "Not found"
email = data["email"]
client.userdata.confirmation_tokens.delete_one(data)
return True, email
def clear_queue(client):
client.update_queue_db.update_queue.delete_many({})
client.update_queue_db.job_ids.delete_many({})
client.update_queue_db.sessions_active.delete_many({})
def get_accounts_order_data(accounts_token, client):
data = client.userdata.accounts.find_one({
"token": accounts_token
})
if data is None:
return []
return data["order"], data["data"]
'''
def clear(client):
client = pymongo.MongoClient("mongodb+srv://dbUser:qwep-]123p=]@cluster0-ifgr4.mongodb.net/Cluster0?retryWrites=true&w=majority")
client.ozon_data.items_pool.delete_many({})
clear(0)
'''
#clear_queue()
def account_exist_name_apikey_client_id(name, apikey, client_id, token, client):
data = client.userdata.accounts.find_one({
"token": token
})
if data is None:
return False, "Not found"
if name in data["order"]:
return False, "name"
for i in data["data"]:
if apikey == data["data"][i]["apikey"]:
return False, "apikey"
elif client_id == data["data"][i]["client_id"]:
return False, "client_id"
return True, ""
def add_account(name, apikey, client_id, token, client):
data = client.userdata.accounts.find_one({
"token": token
})
if data is None:
return False, "Not found"
data["order"].append(name)
data["data"][name] = {
"apikey": apikey,
"client_id": client_id
}
client.userdata.accounts.update_one({
"token": token
}, {"$set": data})
def delete_account_from_db(token, pos, client):
accounts = client.userdata.accounts.find_one({
"token": token
})
if accounts is None:
return False, "Not found"
acc_name = accounts["order"][pos]
accounts["order"].pop(pos)
accounts["data"].pop(acc_name)
client.userdata.accounts.update_one({
"token": token
}, {"$set": accounts})
return True, ""
def init_session(uid, email, accounts_token, client):
delete_session(uid, client)
accounts = client.userdata.accounts.find_one({
"token": accounts_token
})
if not accounts:
client.userdata.accounts.insert_one({
"token": accounts_token,
"order": [],
"data": {}
})
accounts = {
"order": [],
"data": {}
}
client.sessions_data.sessions_active.insert_one({
"uid": uid,
"email_show": email.split("@")[0],
"email": email,
"accounts_token": accounts_token,
"cur_pos": 0,
"panel": "dashboard",
"tab": "postings_all",
"done": ""
})
def get_session(uid, client):
return client.sessions_data.sessions_active.find_one({
"uid": uid
})
def modify_session(uid, data, client):
client.sessions_data.sessions_active.update_one({
"uid": uid
}, {"$set": data})
def delete_session(uid, client):
client.sessions_data.sessions_active.delete_one({
"uid": uid
})
def mark_pending(job_id, client):
client.update_queue_db.job_ids.insert_one({
"job_id": job_id
})
print("inserted")
def mark_done(job_id, client):
client.update_queue_db.job_ids.delete_one({
"job_id": job_id
})
def check_job(job_id, client):
return client.update_queue_db.job_ids.find_one({
"job_id": job_id
}) is None
def get_items(api_key, client_id, client, type="items_all"):
q = {
"creds": f"{api_key}:{client_id}"
}
if type != "items_all":
q["status"] = type.upper()
return client.ozon_data.items_pool.find(q).sort('date', -1)
def get_postings(api_key, client_id, client, type="postings_all"):
q = {
"creds": f"{api_key}:{client_id}"
}
if type != "postings_all":
q["status"] = type
return client.ozon_data.postings_pool.find(q).sort('date', -1)
def save_file(api_key, client_id, name, content, client):
fs = gridfs.GridFS(client.files)
file_id = fs.put(content, filename=name)
client.user_files_list.user_files_list.insert_one({
"creds": f"{api_key}:{client_id}",
"file_id": file_id,
"date": datetime.datetime.now(),
"name": name
})
def get_files_list(api_key, client_id, client):
return client.user_files_list.user_files_list.find({
"creds": f"{api_key}:{client_id}"
}).sort("date", -1)
def get_file(f_id, client):
data = client.files.fs.chunks.find_one({
"files_id": ObjectId(f_id)
})
if data is None:
return None
return data["data"]
def delete_file(api_key, client_id, f_id, client):
data = client.user_files_list.user_files_list.find_one({
"creds": f"{api_key}:{client_id}",
"file_id": ObjectId(f_id)
})
if data is None:
return
delete_file_gridfs(f_id, client)
client.user_files_list.user_files_list.delete_one(data)
def delete_file_gridfs(f_id, client):
client.files.fs.chunks.delete_one({
"file_id": ObjectId(f_id)
})
def check_job_not_exist(api_key, client_id, channel, client): # , type=None):
q = {
"api_key": api_key,
"client_id": client_id,
"channel": channel
}
"""
if type:
q["type"] = type
"""
data = client.update_queue_db.update_queue.find_one(q)
return data is None or data["attempts"] > 1
def insert_deliver_job(api_key, client_id, posting_numbers, job_id, client):
if check_job_not_exist(api_key, client_id, "deliver_queue", client):
queue = Queue(client.update_queue_db.update_queue, consumer_id=''.join(random.choice(string.ascii_lowercase) for i in range(10)), timeout=300, max_attempts=3)
queue.put({"api_key": api_key, "client_id": client_id, "posting_numbers": posting_numbers, "job_id": job_id}, channel="deliver_queue")
mark_pending(job_id, client)
def insert_items_update_job(api_key, client_id, job_id, client):
if check_job_not_exist(api_key, client_id, "items_priority", client):
queue = Queue(client.update_queue_db.update_queue, consumer_id=''.join(random.choice(string.ascii_lowercase) for i in range(10)), timeout=300, max_attempts=3)
queue.put({"api_key": api_key, "client_id": client_id, "job_id": job_id}, channel="items_priority")
mark_pending(job_id, client)
def insert_items_regular_update(api_key, client_id, job_id, client):
if check_job_not_exist(api_key, client_id, "items_priority", client) and check_job_not_exist(api_key, client_id, "items_queue", client):
queue = Queue(client.update_queue_db.update_queue, consumer_id=''.join(random.choice(string.ascii_lowercase) for i in range(10)), timeout=300, max_attempts=3)
queue.put({"api_key": api_key, "client_id": client_id, "job_id": job_id}, channel="items_queue")
mark_pending(job_id, client)
def insert_postings_new_update_job(api_key, client_id, job_id, client):
if check_job_not_exist(api_key, client_id, "postings_priority", client):
queue = Queue(client.update_queue_db.update_queue, consumer_id=''.join(random.choice(string.ascii_lowercase) for i in range(10)), timeout=300, max_attempts=3)
queue.put({"api_key": api_key, "client_id": client_id, "job_id": job_id, "type": "new"}, channel="postings_priority")
mark_pending(job_id, client)
def insert_postings_status_update_job(api_key, client_id, job_id, client):
if check_job_not_exist(api_key, client_id, "postings_priority", client):
queue = Queue(client.update_queue_db.update_queue, consumer_id=''.join(random.choice(string.ascii_lowercase) for i in range(10)), timeout=300, max_attempts=3)
queue.put({"api_key": api_key, "client_id": client_id, "job_id": job_id, "type": "status"}, channel="postings_priority")
mark_pending(job_id, client)
def insert_postings_update_job(api_key, client_id, job_id, client):
if check_job_not_exist(api_key, client_id, "postings_priority", client):
queue = Queue(client.update_queue_db.update_queue, consumer_id=''.join(random.choice(string.ascii_lowercase) for i in range(10)), timeout=300, max_attempts=3)
queue.put({"api_key": api_key, "client_id": client_id, "job_id": job_id, "type": "all"}, channel="postings_priority")
print("put")
pprint({"api_key": api_key, "client_id": client_id, "job_id": job_id})
mark_pending(job_id, client)
print("INSERTED")
def insert_postings_regular_update(api_key, client_id, job_id, client):
if check_job_not_exist(api_key, client_id, "postings_priority", client) and check_job_not_exist(api_key, client_id, "postings_queue", client):
queue = Queue(client.update_queue_db.update_queue, consumer_id=''.join(random.choice(string.ascii_lowercase) for i in range(10)), timeout=300, max_attempts=3)
queue.put({"api_key": api_key, "client_id": client_id, "job_id": job_id}, channel="postings_queue")
mark_pending(job_id, client)
def insert_act_job(api_key, client_id, job_id, client):
if check_job_not_exist(api_key, client_id, "act_queue", client):
queue = Queue(client.update_queue_db.update_queue, consumer_id=''.join(random.choice(string.ascii_lowercase) for i in range(10)), timeout=300, max_attempts=3)
queue.put({"api_key": api_key, "client_id": client_id, "job_id": job_id}, channel="act_queue")
mark_pending(job_id, client)
def insert_labels_upload_job(api_key, client_id, posting_numbers, job_id, client):
data = client.update_queue_db.update_queue.find_one({
"api_key": api_key,
"client_id": client_id,
"posting_numbers": posting_numbers,
"channel": "labels_queue"
})
if data is None or data["attempts"] > 1:
queue = Queue(client.update_queue_db.update_queue, consumer_id=''.join(random.choice(string.ascii_lowercase) for i in range(10)), timeout=300, max_attempts=3)
queue.put({"api_key": api_key, "client_id": client_id, "job_id": job_id, "posting_numbers": posting_numbers}, channel="labels_queue")
mark_pending(job_id, client)
| [
"gcc0xff@gmail.com"
] | gcc0xff@gmail.com |
30c0de57c10ed526a1bc89beaa8aed33b6c0e89f | cb40fda810fa7889e0324b254320168ccde84bc5 | /scope.py | 098cd2085216ac03a0c41ae6208ef28c83a25f35 | [] | no_license | gsmith98/Compilers-Interpreters | 434621e204c23002195d016adc5e49af02823f7f | 0d1c4a75f954bcfe13cf1adffc981fed9b416776 | refs/heads/master | 2020-03-06T20:57:01.274889 | 2018-03-28T01:38:16 | 2018-03-28T01:38:16 | 127,066,631 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,172 | py | # Graham Smith gsmith98@jhu.edu
from customexception import CustException
from entry import *
class Scope(object):
def __init__(self, out):
self.__outer = out # pointer to outer scope
self.__table = {} # dictionary: the symbol table
def insert(self, name, entry):
if name in self.__table: # could call local, but I find this clearer
raise CustException(name + " declared a second time in same scope")
self.__table[name] = entry
def find(self, name):
if name in self.__table:
return self.__table[name]
elif self.__outer is not None:
return self.__outer.find(name)
else:
return None
def local(self, name):
return name in self.__table
def to_string(self):
return "Scope " + str(self.__table)
def sever(self):
self.__outer = None
def getTable(self):
return self.__table
def accept(self, visitor):
return visitor.visit(self)
def get_size(self):
summ = 0
for name in self.__table:
ent = self.__table[name]
if not isinstance(ent, Type):
summ += ent.getType().get_size()
if isinstance(ent, Constant) and (
ent.getValue() < -(2**31) or ent.getValue() > (2**31) - 1):
raise CustException("Value " + str(ent.getValue()) +
" won't fit in a 32 bit register")
if summ > 2**16: # maximum offset tht can be used
raise CustException("Space needed surpassed addressable bounds")
return summ
# give offset from the beginning of the containing scope
# a record may be at offset 8, its first member at
# offset 0 (from that 8)
def give_offsets(self):
off = 0
for name in self.__table:
if not isinstance(self.__table[name], Type):
self.__table[name].set_address(off)
if isinstance(self.__table[name].getType(), Record):
self.__table[name].getType().getScope().give_offsets()
off += self.__table[name].getType().get_size()
| [
"smitgw11@gmail.com"
] | smitgw11@gmail.com |
9d1bf787ebd696de47a90e2ffeb0890f15f54314 | e58340c84c35922ddac12c3eb4d7e4534ec62bd3 | /blockchain/lib/data/historical/__init__.py | 6b236f5d1542ed4898e21c5c2d89fb93259f5614 | [] | no_license | niki-rohani/blockchainanalysis | 106d1b10fd582fcc9a643a72efe8cf12be315e86 | fa43fe0e116ea951c0f0bc9f9de971a8d55148d1 | refs/heads/master | 2021-06-29T22:17:33.169284 | 2017-09-17T16:36:40 | 2017-09-17T16:36:40 | 102,991,795 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | from .getter import Getter
from .quandl_getter import *
from .poliniex_getter import * | [
"niki@clustaar.com"
] | niki@clustaar.com |
ff31f03d357f8dd02d1fef1e8193bb092e608bea | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02708/s465285057.py | b9e8c08a651992f50a225309892a6784c1a1572f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | from itertools import accumulate
N,K = map(int,input().split())
acc = list(accumulate(range(N+1), lambda x,y:x+y))
ans = 0
mod = 10**9+7
for i in range(K, N+1):
r = acc[N] - acc[N-i]
l = acc[i-1]
ans = (ans+r-l+1) % mod
ans += 1
print(ans % mod) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
261e311327b98944fe31f1d14e3811f0bd482d99 | 499b635838407840f350d1dd5eebb9e999537ef3 | /leetcode/841.py | 1de28de431307bb12b4f24a251b1303701ab309b | [] | no_license | DzwS/python | f57f1d671c523b8b10196f5e7e830998dae8056d | 69145c23fe5d43346e17cd6c88cd987ebbd32dc5 | refs/heads/master | 2021-06-12T12:25:22.098692 | 2020-06-18T05:30:54 | 2020-06-18T05:30:54 | 98,389,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,183 | py | # -*- coding:utf-8 -*-
import json
class Solution(object):
def canVisitAllRooms(self, rooms):
"""
:type rooms: List[List[int]]
:rtype: bool
"""
pass_room = [0]
if len(pass_room) == len(rooms):
return True
else:
for i in range(len(rooms[0])):
if rooms[0][i] not in pass_room:
pass_room.append(rooms[0][i])
pass_room = self.recursion(pass_room, rooms, rooms[0][i])
#print 'pass_room', pass_room
if len(pass_room) == len(rooms):
return True
return False
def recursion(self, pass_room, rooms, key):
print 'recirsion'
if len(pass_room) == len(rooms):
return pass_room
else:
for i in range(len(rooms[key])):
#如果房间i没有访问,则访问该房间
if rooms[key][i] not in pass_room:
pass_room.append(rooms[key][i])
#print 'function', pass_room
pass_room = self.recursion(pass_room, rooms, rooms[key][i])
return pass_room | [
"zhiwei.deng@quantil.com"
] | zhiwei.deng@quantil.com |
5b2e346f5c8b6a03ce20a7f271cc29b680d72174 | 9d177a8d997545a2be5e2e6d1cc18fd274bdd07c | /python/algorithm/dfs_graph.py | 5f74387f452e5bf2831c2377f1d5144e9b4b701f | [] | no_license | hamidb/interview-questions | e33ff1a8a54e5eb161c6dcc8d32dd6faf031545c | 54c7a037dcc4f2a62e0193875049338cdbcf5e78 | refs/heads/master | 2021-11-27T20:03:50.350437 | 2021-11-15T17:41:03 | 2021-11-15T17:41:03 | 238,605,239 | 18 | 7 | null | null | null | null | UTF-8 | Python | false | false | 886 | py | """Graph dfs"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from adt.graph import Graph
def dfs_recursive(graph):
assert isinstance(graph, Graph)
assert graph.size > 0
def dfs(graph, i, visited):
if visited[i]:
return 0
visited[i] = 1
count = 1
for edge in (graph.get_edges(i) or []):
count += dfs(graph, edge.to, visited)
return count
visited = graph.size*[0]
return dfs(graph, 0, visited)
def dfs_iterative(graph):
assert isinstance(graph, Graph)
assert graph.size > 0
visited = graph.size*[0]
visited[0] = 1
stack = [0]
count = 0
while stack:
node = stack.pop(len(stack)-1)
count += 1
for edge in (graph.get_edges(node) or []):
if visited[edge.to]:
continue
visited[edge.to] = 1
stack.append(edge.to)
return count
| [
"hamidbazargani@gmail.com"
] | hamidbazargani@gmail.com |
9908a56a061eff21e4ceac0b1022f1a7490368bd | 52070bb0c7801e927c137ef0c3c1280e35e8ff62 | /setup.py | 1abef2de6614eb9052f7a6abaaa0f5904ac17d61 | [
"MIT"
] | permissive | frommie/rasahub-humhub | 0dd24fcfc73a7496672087465427add14ce26a9a | fe8b2f64caa37f05fcc977d331285668a7fcbb84 | refs/heads/master | 2020-03-14T12:40:52.803384 | 2018-05-29T11:45:43 | 2018-05-29T11:45:43 | 131,617,272 | 0 | 1 | MIT | 2018-05-29T11:04:52 | 2018-04-30T16:06:24 | Python | UTF-8 | Python | false | false | 1,009 | py | from setuptools import setup, find_packages
install_requires = [
'rasahub',
'mysql-connector',
'nltk',
'rasahub-google-calendar',
]
tests_requires = [
'testing.common.database',
'testing.mysqld'
]
extras_requires = {
'test': tests_requires
}
setup(name='rasahub-humhub',
version='0.3.1',
description='Humhub connector for Rasahub',
url='http://github.com/frommie/rasahub-humhub',
author='Christian Frommert',
author_email='christian.frommert@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development',
],
keywords='rasahub humhub',
packages=find_packages(exclude=['docs', 'tests']),
install_requires=install_requires,
tests_require=tests_requires,
extras_require=extras_requires,
)
| [
"christian.frommert@gmail.com"
] | christian.frommert@gmail.com |
28dd7802ca94809fb9c83ef1872020eeb3df4970 | 43078a93c7c0b1f0ff9d4038ddb70b091a019851 | /backend/api/main.py | ef9b8f8c2597d6d24a46bcc2f5617e5a2e8aa370 | [] | no_license | danherre/fakebook-marketplace | 3d9dbcbe7413629c819bbcf693c2ae240abad78c | e62d83f2ac074f20d6efe88316881aa0fb66af69 | refs/heads/master | 2023-01-23T15:48:42.266358 | 2020-12-08T01:11:44 | 2020-12-08T01:11:44 | 319,466,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from flask import Flask, request, session, abort
app = Flask(__name__)
@app.route("/")
def hello():
return "Hello World!"
if __name__ == "__main__":
app.run() | [
"amanzhan@umich.edu"
] | amanzhan@umich.edu |
dd05a3660265b3bb6cf0d8000feff2f9889e8c5a | ee4b296cc4a76aef70a619a72388646e8a375966 | /main_app/migrations/0006_dog_user.py | a2532b67574add8e8e764a2b62176e7916a4d78d | [] | no_license | sweetvanloan/dogcollector | 2c4868ce6ea09b1d4c090c2713e4c7490e5fb73d | f55ac06d50413f570ea648daf2b215001bc80407 | refs/heads/master | 2022-12-22T07:58:42.545424 | 2020-09-22T00:32:39 | 2020-09-22T00:32:39 | 297,463,294 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 592 | py | # Generated by Django 3.1 on 2020-09-21 23:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main_app', '0005_photo'),
]
operations = [
migrations.AddField(
model_name='dog',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='auth.user'),
preserve_default=False,
),
]
| [
"Sweet@SweetVanLoan.com"
] | Sweet@SweetVanLoan.com |
14d9b13c4985317a5d54ac63e32834cff9f3ed42 | 6531330231a8ea568534cf8c73bdab9422288a9e | /authentication/settings.py | 7fcef2268dad295ce816c2690db028241483fd0b | [
"MIT"
] | permissive | mateusvictor/Authentication-System | 7bbb1cc45f237b22cfa2f88ba93ac9f19507d302 | c94d97f38bfaf5d6861de633a4f7e2c375874d7e | refs/heads/main | 2023-04-10T08:34:10.871198 | 2021-04-25T19:41:45 | 2021-04-25T19:41:45 | 361,516,250 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,266 | py | """
Django settings for authentication project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2j!=pf*8t^2k=j@-1r3r3&18+&ou2(e5icf!hl-il&v1a#5lco'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'crispy_forms',
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'authentication.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'authentication.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'users.User'
LOGIN_URL = 'login'
LOGIN_REDIRECT_URL = 'home'
LOGOUT_REDIRECT_URL = 'home'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
| [
"mateus_victors@outlook.com"
] | mateus_victors@outlook.com |
41dc432df23214ff9f97ee36dd737200733c996b | c7792f3ef178fcc36ceea9c54903c18c32a562a1 | /problems/258. Add Digits.py | 60d891c576d4d60c5f22bc2f0ba26cc124cffafd | [] | no_license | yukiii-zhong/Leetcode | ed69cc9a96aac5ed7ce7fa4acb048cadf50dc568 | 0fc972e5cd2baf1b5ddf8b192962629f40bc3bf4 | refs/heads/master | 2020-03-10T12:17:23.211124 | 2018-07-17T13:43:57 | 2018-07-17T13:43:57 | 129,374,261 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | class Solution:
def addDigits(self, num):
"""
:type num: int
:rtype: int
"""
while num >=10:
temp = 0
while num > 0:
temp += num % 10
num = num // 10
num = temp
return num | [
"30614747+yukiii-zhong@users.noreply.github.com"
] | 30614747+yukiii-zhong@users.noreply.github.com |
3784c3442149346ad2ad5f0b39e5d50c865b2abc | d019baf5ec7911a72b335d2a3dc5165294cf87ad | /practice 2/Built_in_function_1.py | c725fde604b06d82a2d9e3019f52b15f430ee197 | [] | no_license | EunSe-o/Python | aaf56ec210d60dd48c3983d5e597da44ec49b3e6 | f062f40b7051e888fda6976cee6fbf9634244a73 | refs/heads/master | 2023-02-16T20:50:54.557663 | 2021-01-09T07:13:36 | 2021-01-09T07:13:36 | 287,592,069 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | print(abs(-5)) # abs : 절댓값
print(pow(2,3)) # pow(a,b) : a^b의 값
print(max(5,22)) # max : 최댓값
print(min(4,11)) # min : 최솟값
print(round(3.14)) # 3 / # round : 반올림
print(round(5.99)) # 6
from math import * # math library 안에 있는 모든 것을 사용
print(floor(5.99)) # 5 / # floor : 내림 , 소숫점 이하를 모두 삭제
print(ceil(3.14)) # 4 / # ceil : 올림 , 소숫점과 관계없이 정수자리 1 올림
print(sqrt(16)) # 4 / # sqrt : 제곱근 , 16의 제곱근을 구함
| [
"dkan601@gmail.com"
] | dkan601@gmail.com |
1ba19442b8c3c6088b2d7f91c116de4870e58ec3 | 8f8ac99fd3ed9ceb36778b404f6fdd0b6899d3f4 | /pyobjc-framework-SpriteKit/Lib/SpriteKit/_metadata.py | 133d826139cade678d14d84de54d9b4985ef7ec4 | [
"MIT"
] | permissive | strogo/pyobjc | ac4201c7742eb75348328eeecb7eedf4e3458de3 | 2579c5eaf44b0c5af77ee195c417d2c65e72dfda | refs/heads/master | 2023-07-13T00:41:56.448005 | 2021-08-24T06:42:53 | 2021-08-24T06:42:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,549 | py | # This file is generated by objective.metadata
#
# Last update: Sun Jul 11 21:54:16 2021
#
# flake8: noqa
import objc, sys
if sys.maxsize > 2 ** 32:
def sel32or64(a, b):
return b
else:
def sel32or64(a, b):
return a
if objc.arch == "arm64":
def selAorI(a, b):
return a
else:
def selAorI(a, b):
return b
misc = {}
constants = """$$"""
enums = """$PHYSICSKIT_MINUS_GL_IMPORTS@1$SKActionTimingEaseIn@1$SKActionTimingEaseInEaseOut@3$SKActionTimingEaseOut@2$SKActionTimingLinear@0$SKAttributeTypeFloat@1$SKAttributeTypeHalfFloat@5$SKAttributeTypeNone@0$SKAttributeTypeVectorFloat2@2$SKAttributeTypeVectorFloat3@3$SKAttributeTypeVectorFloat4@4$SKAttributeTypeVectorHalfFloat2@6$SKAttributeTypeVectorHalfFloat3@7$SKAttributeTypeVectorHalfFloat4@8$SKBlendModeAdd@1$SKBlendModeAlpha@0$SKBlendModeMultiply@3$SKBlendModeMultiplyAlpha@7$SKBlendModeMultiplyX2@4$SKBlendModeReplace@6$SKBlendModeScreen@5$SKBlendModeSubtract@2$SKInterpolationModeLinear@1$SKInterpolationModeSpline@2$SKInterpolationModeStep@3$SKLabelHorizontalAlignmentModeCenter@0$SKLabelHorizontalAlignmentModeLeft@1$SKLabelHorizontalAlignmentModeRight@2$SKLabelVerticalAlignmentModeBaseline@0$SKLabelVerticalAlignmentModeBottom@3$SKLabelVerticalAlignmentModeCenter@1$SKLabelVerticalAlignmentModeTop@2$SKNodeFocusBehaviorFocusable@2$SKNodeFocusBehaviorNone@0$SKNodeFocusBehaviorOccluding@1$SKParticleRenderOrderDontCare@2$SKParticleRenderOrderOldestFirst@1$SKParticleRenderOrderOldestLast@0$SKRepeatModeClamp@1$SKRepeatModeLoop@2$SKSceneScaleModeAspectFill@1$SKSceneScaleModeAspectFit@2$SKSceneScaleModeFill@0$SKSceneScaleModeResizeFill@3$SKTextureFilteringLinear@1$SKTextureFilteringNearest@0$SKTileAdjacencyAll@255$SKTileAdjacencyDown@16$SKTileAdjacencyDownEdge@199$SKTileAdjacencyLeft@64$SKTileAdjacencyLeftEdge@31$SKTileAdjacencyLowerLeft@32$SKTileAdjacencyLowerLeftCorner@253$SKTileAdjacencyLowerLeftEdge@7$SKTileAdjacencyLowerRight@8$SKTileAdjacencyLowerRightCorner@127$SKTileAdjacencyLowerRightEdge@193$SKTileAdjacencyRight@4$SKTileAdjacencyRightEdge@241$SKTileAdjacencyUp@1$SKTileAdjacencyUpEdge@124$SKTileAdjacencyUpperLeft@128$SKTileAdjacencyUpperLeftCorner@247$SKTileAdjacencyUpperLeftEdge@28$SKTileAdjacencyUpperRight@2$SKTileAdjacencyUpperRightCorner@223$SKTileAdjacencyUpperRightEdge@112$SKTileDefinitionRotation0@0$SKTileDefinitionRotation180@2$SKTileDefinitionRotation270@3$SKTileDefinitionRotation90@1$SKTileHexFlatAdjacencyAll@63$SKTileHexFlatAdjacencyDown@8$SKTileHexFlatAdjacencyLowerLeft@16$SKTileHexFlatAdjacencyLowerRight@4$SKTileHexFlatAdjacencyUp@1$SKTileHexFlatAdjacencyUpperLeft@32$SKTileHexFlatAdjacencyUpperRight@2$SKTileHexPointyAdjacencyAdd@63$SKTileHexPointyAdjacencyLeft@32$SKTileHexPointyAdjacencyLowerLeft@16$SKTileHexPointyAdjacencyLowerRight@8$SKTileHexPointyAdjacencyRight@4$SKTileHexPointyAdjacencyUpperLeft@1$SKTileHexPointyAdjacencyUpperRight@2$SKTileSetTypeGrid@0$SKTileSetTypeHexagonalFlat@2$SKTileSetTypeHexagonalPointy@3$SKTileSetTypeIsometric@1$SKTransitionDirectionDown@1$SKTransitionDirectionLeft@3$SKTransitionDirectionRight@2$SKTransitionDirectionUp@0$SKUniformTypeFloat@1$SKUniformTypeFloatMatrix2@5$SKUniformTypeFloatMatrix3@6$SKUniformTypeFloatMatrix4@7$SKUniformTypeFloatVector2@2$SKUniformTypeFloatVector3@3$SKUniformTypeFloatVector4@4$SKUniformTypeNone@0$SKUniformTypeTexture@8$SKVIEW_AVAILABLE@1$"""
misc.update({})
aliases = {"SK_AVAILABLE": "__OSX_AVAILABLE_STARTING", "SKColor": "NSColor"}
r = objc.registerMetaDataForSelector
objc._updatingMetadata(True)
try:
r(
b"NSObject",
b"didApplyConstraintsForScene:",
{"required": False, "retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"didBeginContact:",
{"required": False, "retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"didEndContact:",
{"required": False, "retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"didEvaluateActionsForScene:",
{"required": False, "retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"didFinishUpdateForScene:",
{"required": False, "retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"didSimulatePhysicsForScene:",
{"required": False, "retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"setSubdivisionLevels:",
{"required": True, "retval": {"type": b"v"}, "arguments": {2: {"type": b"q"}}},
)
r(
b"NSObject",
b"setWarpGeometry:",
{"required": True, "retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(b"NSObject", b"subdivisionLevels", {"required": True, "retval": {"type": b"q"}})
r(
b"NSObject",
b"update:forScene:",
{
"required": False,
"retval": {"type": b"v"},
"arguments": {2: {"type": b"d"}, 3: {"type": b"@"}},
},
)
r(
b"NSObject",
b"view:shouldRenderAtTime:",
{
"required": False,
"retval": {"type": "Z"},
"arguments": {2: {"type": b"@"}, 3: {"type": "d"}},
},
)
r(b"NSObject", b"warpGeometry", {"required": True, "retval": {"type": b"@"}})
r(b"SK3DNode", b"autoenablesDefaultLighting", {"retval": {"type": b"Z"}})
r(b"SK3DNode", b"isPlaying", {"retval": {"type": b"Z"}})
r(b"SK3DNode", b"loops", {"retval": {"type": b"Z"}})
r(
b"SK3DNode",
b"projectPoint:",
{"retval": {"type": b"%"}, "arguments": {2: {"type": b"%"}}},
)
r(
b"SK3DNode",
b"setAutoenablesDefaultLighting:",
{"arguments": {2: {"type": b"Z"}}},
)
r(b"SK3DNode", b"setLoops:", {"arguments": {2: {"type": b"Z"}}})
r(b"SK3DNode", b"setPlaying:", {"arguments": {2: {"type": b"Z"}}})
r(
b"SK3DNode",
b"unprojectPoint:",
{"retval": {"type": b"%"}, "arguments": {2: {"type": b"%"}}},
)
r(
b"SKAction",
b"animateWithNormalTextures:timePerFrame:resize:restore:",
{"arguments": {4: {"type": b"Z"}, 5: {"type": b"Z"}}},
)
r(
b"SKAction",
b"animateWithTextures:timePerFrame:resize:restore:",
{"arguments": {4: {"type": b"Z"}, 5: {"type": b"Z"}}},
)
r(
b"SKAction",
b"animateWithWarps:times:restore:",
{"arguments": {4: {"type": b"Z"}}},
)
r(
b"SKAction",
b"customActionWithDuration:actionBlock:",
{
"arguments": {
3: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": sel32or64(b"f", b"d")},
},
}
}
}
},
)
r(
b"SKAction",
b"followPath:asOffset:orientToPath:duration:",
{"arguments": {3: {"type": b"Z"}, 4: {"type": b"Z"}}},
)
r(
b"SKAction",
b"followPath:asOffset:orientToPath:speed:",
{"arguments": {3: {"type": b"Z"}, 4: {"type": b"Z"}}},
)
r(
b"SKAction",
b"performSelector:onTarget:",
{"arguments": {2: {"sel_of_type": b"v@:"}}},
)
r(
b"SKAction",
b"playSoundFileNamed:waitForCompletion:",
{"arguments": {3: {"type": b"Z"}}},
)
r(
b"SKAction",
b"rotateToAngle:duration:shortestUnitArc:",
{"arguments": {4: {"type": b"Z"}}},
)
r(
b"SKAction",
b"runBlock:",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"@?"},
"arguments": {0: {"type": b"^v"}},
}
}
}
},
)
r(
b"SKAction",
b"runBlock:queue:",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"@?"},
"arguments": {0: {"type": b"^v"}},
}
}
}
},
)
r(b"SKAction", b"setNormalTexture:resize:", {"arguments": {3: {"type": b"Z"}}})
r(b"SKAction", b"setTexture:resize:", {"arguments": {3: {"type": b"Z"}}})
r(
b"SKAction",
b"setTimingFunc:",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"f"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"f"}},
}
}
}
},
)
r(
b"SKAction",
b"setTimingFunction:",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"f"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"f"}},
}
}
}
},
)
r(
b"SKAction",
b"timingFunc",
{
"retval": {
"callable": {
"retval": {"type": b"f"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"f"}},
}
}
},
)
r(
b"SKAction",
b"timingFunction",
{
"retval": {
"callable": {
"retval": {"type": b"f"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"f"}},
}
}
},
)
r(b"SKAudioNode", b"autoplayLooped", {"retval": {"type": b"Z"}})
r(b"SKAudioNode", b"isPositional", {"retval": {"type": b"Z"}})
r(b"SKAudioNode", b"setAutoplayLooped:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKAudioNode", b"setPositional:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKCameraNode", b"containsNode:", {"retval": {"type": b"Z"}})
r(b"SKConstraint", b"enabled", {"retval": {"type": b"Z"}})
r(b"SKConstraint", b"setEnabled:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKEffectNode", b"setShouldCenterFilter:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKEffectNode", b"setShouldEnableEffects:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKEffectNode", b"setShouldRasterize:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKEffectNode", b"shouldCenterFilter", {"retval": {"type": b"Z"}})
r(b"SKEffectNode", b"shouldEnableEffects", {"retval": {"type": b"Z"}})
r(b"SKEffectNode", b"shouldRasterize", {"retval": {"type": b"Z"}})
r(
b"SKFieldNode",
b"customFieldWithEvaluationBlock:",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"@?"},
"arguments": {0: {"type": b"^v"}},
}
}
}
},
)
r(b"SKFieldNode", b"direction", {"retval": {"type": b"%"}})
r(b"SKFieldNode", b"isEnabled", {"retval": {"type": b"Z"}})
r(b"SKFieldNode", b"isExclusive", {"retval": {"type": b"Z"}})
r(
b"SKFieldNode",
b"linearGravityFieldWithVector:",
{"arguments": {2: {"type": b"%"}}},
)
r(b"SKFieldNode", b"setDirection:", {"arguments": {2: {"type": b"%"}}})
r(b"SKFieldNode", b"setEnabled:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKFieldNode", b"setExclusive:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKFieldNode", b"velocityFieldWithVector:", {"arguments": {2: {"type": b"%"}}})
r(b"SKLightNode", b"isEnabled", {"retval": {"type": b"Z"}})
r(b"SKLightNode", b"setEnabled:", {"arguments": {2: {"type": b"Z"}}})
r(
b"SKMutableTexture",
b"modifyPixelDataWithBlock:",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": b"^v"},
1: {
"type": b"^v",
"type_modifier": "N",
"c_array_length_in_arg": 2,
},
2: {"type": b"Q"},
},
}
}
}
},
)
r(b"SKNode", b"containsPoint:", {"retval": {"type": b"Z"}})
r(
b"SKNode",
b"enumerateChildNodesWithName:usingBlock:",
{
"arguments": {
3: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": b"^Z", "type_modifier": "o"},
},
}
}
}
},
)
r(b"SKNode", b"hasActions", {"retval": {"type": b"Z"}})
r(b"SKNode", b"inParentHierarchy:", {"retval": {"type": b"Z"}})
r(b"SKNode", b"intersectsNode:", {"retval": {"type": b"Z"}})
r(b"SKNode", b"isAccessibilityElement", {"retval": {"type": b"Z"}})
r(b"SKNode", b"isAccessibilityEnabled", {"retval": {"type": b"Z"}})
r(b"SKNode", b"isEqualToNode:", {"retval": {"type": b"Z"}})
r(b"SKNode", b"isHidden", {"retval": {"type": b"Z"}})
r(b"SKNode", b"isPaused", {"retval": {"type": b"Z"}})
r(b"SKNode", b"isUserInteractionEnabled", {"retval": {"type": b"Z"}})
r(
b"SKNode",
b"nodeWithFileNamed:securelyWithClasses:andError:",
{"arguments": {4: {"type_modifier": b"o"}}},
)
r(
b"SKNode",
b"runAction:completion:",
{
"arguments": {
3: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}},
}
}
}
},
)
r(b"SKNode", b"setAccessibilityElement:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKNode", b"setAccessibilityEnabled:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKNode", b"setHidden:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKNode", b"setPaused:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKNode", b"setUserInteractionEnabled:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKPhysicsBody", b"affectedByGravity", {"retval": {"type": b"Z"}})
r(b"SKPhysicsBody", b"allowsRotation", {"retval": {"type": b"Z"}})
r(b"SKPhysicsBody", b"isDynamic", {"retval": {"type": b"Z"}})
r(b"SKPhysicsBody", b"isResting", {"retval": {"type": b"Z"}})
r(b"SKPhysicsBody", b"pinned", {"retval": {"type": b"Z"}})
r(b"SKPhysicsBody", b"setAffectedByGravity:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKPhysicsBody", b"setAllowsRotation:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKPhysicsBody", b"setDynamic:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKPhysicsBody", b"setPinned:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKPhysicsBody", b"setResting:", {"arguments": {2: {"type": b"Z"}}})
r(
b"SKPhysicsBody",
b"setUsesPreciseCollisionDetection:",
{"arguments": {2: {"type": b"Z"}}},
)
r(b"SKPhysicsBody", b"usesPreciseCollisionDetection", {"retval": {"type": b"Z"}})
r(
b"SKPhysicsJointPin",
b"setShouldEnableLimits:",
{"arguments": {2: {"type": b"Z"}}},
)
r(b"SKPhysicsJointPin", b"shouldEnableLimits", {"retval": {"type": b"Z"}})
r(
b"SKPhysicsJointSliding",
b"setShouldEnableLimits:",
{"arguments": {2: {"type": b"Z"}}},
)
r(b"SKPhysicsJointSliding", b"shouldEnableLimits", {"retval": {"type": b"Z"}})
r(
b"SKPhysicsWorld",
b"enumerateBodiesAlongRayStart:end:usingBlock:",
{
"arguments": {
4: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": b"{CGPoint=dd}"},
3: {"type": b"{CGVector=dd}"},
4: {"type": b"o^Z"},
},
}
}
}
},
)
r(
b"SKPhysicsWorld",
b"enumerateBodiesAtPoint:usingBlock:",
{
"arguments": {
3: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": b"^Z", "type_modifier": "o"},
},
}
}
}
},
)
r(
b"SKPhysicsWorld",
b"enumerateBodiesInRect:usingBlock:",
{
"arguments": {
3: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": b"^Z", "type_modifier": "o"},
},
}
}
}
},
)
r(
b"SKPhysicsWorld",
b"sampleFieldsAt:",
{"retval": {"type": b"%"}, "arguments": {2: {"type": b"%"}}},
)
r(b"SKRegion", b"containsPoint:", {"retval": {"type": b"Z"}})
r(b"SKRenderer", b"ignoresSiblingOrder", {"retval": {"type": "Z"}})
r(b"SKRenderer", b"setIgnoresSiblingOrder:", {"arguments": {2: {"type": "Z"}}})
r(
b"SKRenderer",
b"setShouldCullNonVisibleNodes:",
{"arguments": {2: {"type": "Z"}}},
)
r(b"SKRenderer", b"setShowsDrawCount:", {"arguments": {2: {"type": "Z"}}})
r(b"SKRenderer", b"setShowsFields:", {"arguments": {2: {"type": "Z"}}})
r(b"SKRenderer", b"setShowsNodeCount:", {"arguments": {2: {"type": "Z"}}})
r(b"SKRenderer", b"setShowsPhysics:", {"arguments": {2: {"type": "Z"}}})
r(b"SKRenderer", b"setShowsQuadCount:", {"arguments": {2: {"type": "Z"}}})
r(b"SKRenderer", b"shouldCullNonVisibleNodes", {"retval": {"type": "Z"}})
r(b"SKRenderer", b"showsDrawCount", {"retval": {"type": "Z"}})
r(b"SKRenderer", b"showsFields", {"retval": {"type": "Z"}})
r(b"SKRenderer", b"showsNodeCount", {"retval": {"type": "Z"}})
r(b"SKRenderer", b"showsPhysics", {"retval": {"type": "Z"}})
r(b"SKRenderer", b"showsQuadCount", {"retval": {"type": "Z"}})
r(b"SKShapeNode", b"isAntialiased", {"retval": {"type": b"Z"}})
r(b"SKShapeNode", b"setAntialiased:", {"arguments": {2: {"type": b"Z"}}})
r(
b"SKShapeNode",
b"shapeNodeWithPath:centered:",
{"arguments": {3: {"type": b"Z"}}},
)
r(
b"SKShapeNode",
b"shapeNodeWithPoints:count:",
{"arguments": {2: {"type_modifier": b"n", "c_array_length_in_arg": 3}}},
)
r(
b"SKShapeNode",
b"shapeNodeWithSplinePoints:count:",
{"arguments": {2: {"type_modifier": b"n", "c_array_length_in_arg": 3}}},
)
r(
b"SKSpriteNode",
b"spriteNodeWithImageNamed:normalMapped:",
{"arguments": {3: {"type": b"Z"}}},
)
r(
b"SKTexture",
b"preloadTextures:withCompletionHandler:",
{
"arguments": {
3: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}},
}
}
}
},
)
r(
b"SKTexture",
b"preloadWithCompletionHandler:",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}},
}
}
}
},
)
r(b"SKTexture", b"setUsesMipmaps:", {"arguments": {2: {"type": b"Z"}}})
r(
b"SKTexture",
b"textureNoiseWithSmoothness:size:grayscale:",
{"arguments": {4: {"type": b"Z"}}},
)
r(
b"SKTexture",
b"textureWithData:size:flipped:",
{"arguments": {4: {"type": b"Z"}}},
)
r(b"SKTexture", b"usesMipmaps", {"retval": {"type": b"Z"}})
r(
b"SKTextureAtlas",
b"preloadTextureAtlases:withCompletionHandler:",
{
"arguments": {
3: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}},
}
}
}
},
)
r(
b"SKTextureAtlas",
b"preloadTextureAtlasesNamed:withCompletionHandler:",
{
"arguments": {
3: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": b"^v"},
1: {"type": b"@"},
2: {"type": b"@"},
},
}
}
}
},
)
r(
b"SKTextureAtlas",
b"preloadWithCompletionHandler:",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}},
}
}
}
},
)
r(b"SKTileDefinition", b"flipHorizontally", {"retval": {"type": "Z"}})
r(b"SKTileDefinition", b"flipVertically", {"retval": {"type": "Z"}})
r(b"SKTileDefinition", b"setFlipHorizontally:", {"arguments": {2: {"type": "Z"}}})
r(b"SKTileDefinition", b"setFlipVertically:", {"arguments": {2: {"type": "Z"}}})
r(b"SKTileMapNode", b"enableAutomapping", {"retval": {"type": b"Z"}})
r(b"SKTileMapNode", b"setEnableAutomapping:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKTransformNode", b"quaternion", {"retval": {"type": b"{_simd_quatf=}"}})
r(
b"SKTransformNode",
b"rotationMatrix",
{"retval": {"type": b"{_matrix_float3x3=?}"}},
)
r(
b"SKTransformNode",
b"setQuaternion:",
{"arguments": {2: {"type": b"{_simd_quatf=}"}}},
)
r(
b"SKTransformNode",
b"setRotationMatrix:",
{"arguments": {2: {"type": b"{_matrix_float3x3=?}"}}},
)
r(b"SKTransition", b"pausesIncomingScene", {"retval": {"type": b"Z"}})
r(b"SKTransition", b"pausesOutgoingScene", {"retval": {"type": b"Z"}})
r(b"SKTransition", b"setPausesIncomingScene:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKTransition", b"setPausesOutgoingScene:", {"arguments": {2: {"type": b"Z"}}})
r(
b"SKUniform",
b"initWithName:matrixFloat2x2:",
{"arguments": {3: {"type": b"{_matrix_float2x2=?}"}}},
)
r(
b"SKUniform",
b"initWithName:matrixFloat3x3:",
{"arguments": {3: {"type": b"{_matrix_float3x3=?}"}}},
)
r(
b"SKUniform",
b"initWithName:matrixFloat4x4:",
{"arguments": {3: {"type": b"{_matrix_float4x4=?}"}}},
)
r(
b"SKUniform",
b"matrixFloat2x2Value",
{"retval": {"type": b"{_matrix_float2x2=?}"}},
)
r(
b"SKUniform",
b"matrixFloat3x3Value",
{"retval": {"type": b"{_matrix_float3x3=?}"}},
)
r(
b"SKUniform",
b"matrixFloat4x4Value",
{"retval": {"type": b"{_matrix_float4x4=?}"}},
)
r(
b"SKUniform",
b"setMatrixFloat2x2Value:",
{"arguments": {2: {"type": b"{_matrix_float2x2=?}"}}},
)
r(
b"SKUniform",
b"setMatrixFloat3x3Value:",
{"arguments": {2: {"type": b"{_matrix_float3x3=?}"}}},
)
r(
b"SKUniform",
b"setMatrixFloat4x4Value:",
{"arguments": {2: {"type": b"{_matrix_float4x4=?}"}}},
)
r(
b"SKUniform",
b"uniformWithName:matrixFloat2x2:",
{"arguments": {3: {"type": b"{_matrix_float2x2=?}"}}},
)
r(
b"SKUniform",
b"uniformWithName:matrixFloat3x3:",
{"arguments": {3: {"type": b"{_matrix_float3x3=?}"}}},
)
r(
b"SKUniform",
b"uniformWithName:matrixFloat4x4:",
{"arguments": {3: {"type": b"{_matrix_float4x4=?}"}}},
)
r(b"SKView", b"allowsTransparency", {"retval": {"type": b"Z"}})
r(b"SKView", b"disableDepthStencilBuffer", {"retval": {"type": "Z"}})
r(b"SKView", b"ignoresSiblingOrder", {"retval": {"type": b"Z"}})
r(b"SKView", b"isAsynchronous", {"retval": {"type": b"Z"}})
r(b"SKView", b"isPaused", {"retval": {"type": b"Z"}})
r(b"SKView", b"setAllowsTransparency:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKView", b"setAsynchronous:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKView", b"setDisableDepthStencilBuffer:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKView", b"setIgnoresSiblingOrder:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKView", b"setPaused:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKView", b"setShouldCullNonVisibleNodes:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKView", b"setShowsDrawCount:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKView", b"setShowsFPS:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKView", b"setShowsFields:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKView", b"setShowsNodeCount:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKView", b"setShowsPhysics:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKView", b"setShowsQuadCount:", {"arguments": {2: {"type": b"Z"}}})
r(b"SKView", b"shouldCullNonVisibleNodes", {"retval": {"type": b"Z"}})
r(b"SKView", b"showsDrawCount", {"retval": {"type": b"Z"}})
r(b"SKView", b"showsFPS", {"retval": {"type": b"Z"}})
r(b"SKView", b"showsFields", {"retval": {"type": b"Z"}})
r(b"SKView", b"showsNodeCount", {"retval": {"type": b"Z"}})
r(b"SKView", b"showsPhysics", {"retval": {"type": b"Z"}})
r(b"SKView", b"showsQuadCount", {"retval": {"type": b"Z"}})
finally:
objc._updatingMetadata(False)
expressions = {
"SKTileAdjacencyRightEdge": "SKTileAdjacencyDown | SKTileAdjacencyLowerLeft | SKTileAdjacencyLeft | SKTileAdjacencyUpperLeft | SKTileAdjacencyUp",
"SKTileAdjacencyUpperRightCorner": "SKTileAdjacencyUp | SKTileAdjacencyUpperRight | SKTileAdjacencyRight | SKTileAdjacencyLowerRight | SKTileAdjacencyDown | SKTileAdjacencyLeft | SKTileAdjacencyUpperLeft",
"SKTileAdjacencyUpperRightEdge": "SKTileAdjacencyDown | SKTileAdjacencyLowerLeft | SKTileAdjacencyLeft",
"SKTileAdjacencyLowerRightCorner": "SKTileAdjacencyUp | SKTileAdjacencyUpperRight | SKTileAdjacencyRight | SKTileAdjacencyLowerRight | SKTileAdjacencyDown | SKTileAdjacencyLowerLeft | SKTileAdjacencyLeft",
"SKTileAdjacencyLowerRightEdge": "SKTileAdjacencyLeft | SKTileAdjacencyUpperLeft | SKTileAdjacencyUp",
"SKTileAdjacencyDownEdge": "SKTileAdjacencyUp | SKTileAdjacencyUpperRight | SKTileAdjacencyRight | SKTileAdjacencyLeft | SKTileAdjacencyUpperLeft",
"SKTileAdjacencyLeftEdge": "SKTileAdjacencyUp | SKTileAdjacencyUpperRight | SKTileAdjacencyRight | SKTileAdjacencyLowerRight | SKTileAdjacencyDown",
"SKTileAdjacencyUpEdge": "SKTileAdjacencyRight | SKTileAdjacencyLowerRight | SKTileAdjacencyDown | SKTileAdjacencyLowerLeft | SKTileAdjacencyLeft",
"SKTileAdjacencyLowerLeftEdge": "SKTileAdjacencyUp | SKTileAdjacencyUpperRight | SKTileAdjacencyRight",
"SKTileAdjacencyUpperLeftCorner": "SKTileAdjacencyUp | SKTileAdjacencyUpperRight | SKTileAdjacencyRight | SKTileAdjacencyDown | SKTileAdjacencyLowerLeft | SKTileAdjacencyLeft | SKTileAdjacencyUpperLeft",
"SKTileAdjacencyUpperLeftEdge": "SKTileAdjacencyRight | SKTileAdjacencyLowerRight | SKTileAdjacencyDown",
"SKTileAdjacencyLowerLeftCorner": "SKTileAdjacencyUp | SKTileAdjacencyRight | SKTileAdjacencyLowerRight | SKTileAdjacencyDown | SKTileAdjacencyLowerLeft | SKTileAdjacencyLeft | SKTileAdjacencyUpperLeft",
}
# END OF FILE
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
33a6c66ed1fcda77e23c93fbeb6b2dd5e4301723 | 3168f262d0e9ae151b04a0deed4642081679e319 | /scripts/datadownload.py | cfbd1e7b313e07bd8866f28530fef00b6fd0a74c | [] | no_license | mariamsm1/ShortProject2020 | ef71106b6ce905b41b38dd05f6067f748a63c746 | c3ef79668613589e42719e73fdd5fef9c8e1b5f0 | refs/heads/master | 2023-04-28T17:20:15.281680 | 2021-05-17T09:25:50 | 2021-05-17T09:25:50 | 257,231,231 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 8,196 | py | #MARIAM MIARI
#
#2020-2021
#
#SCRIPT TO DOWNLOAD ALL DATA USED IN THIS SHORT PROJECT.
#
#---------------------------------
#Downloading .tar.gz files from "The Autophagy database"(release version January. 18, 2017). It requires wget and tarfile libraries.
#save the url of the file to a variable
url = "http://www.tanpaku.org/autophagy/download/autophagyDB.tar.gz"
#download with wget and use the name of the file with its extension
wget.download(url,'autophagyDB.tar.gz')
#Tarfile by default does not treat the file as gzipped so give it the r:gz mode. then open the folder.Here I do not specify 'rb' because it's a folder.
AutophagyDB= tarfile.open('autophagyDB.tar.gz', "r:gz")
#extract the content
AutophagyDB.extractall()
AutophagyDB.close()
#Downloading go.owl. This requires owlready2 library(release vesrion 2020-04)
#specify the directory you want to append the file to
onto_path.append('/Volumes/LaCie/MasterThesis2020/jupTest')
go_onto = get_ontology("http://purl.obolibrary.org/obo/go.owl").load()
#save the file
go_onto.save()
#Downloading go-basic-obo. This requires goatools library and need to import GODag(release version 2020-04)
url = 'http://purl.obolibrary.org/obo/go/go-basic.obo'
wget.download(url,'go-basic.obo')
go_obo = goatools.obo_parser.GODag('go-basic.obo')
#Downloading proteins with basic information from Human Autophagy Modulator database(Version 2.2(2018-06-14))
url = 'http://hamdb.scbdd.com/static/home/download/protein-basic-csv.zip'
wget.download(url, 'protein-basic-csv.zip')
zip = zipfile.ZipFile('protein-basic-csv.zip')
zip.printdir()
zip.extractall()
#when parsing the file specify the endcoding = 'latin1' and low_memory= False
#Downloading Deathbase-public list of proteins (data collection 2020-04-08)
url = 'http://www.deathbase.org/docs/protein_list.txt'
wget.download(url, 'protein_list.txt')
#Downloading yeast cellDeath database-yeast apoptosis database (data collection 2020-04-08):
url = 'http://ycelldeath.com/yapoptosis/download/yApoptosis.csv'
wget.download(url, 'yApoptosis.csv')
#Downloading goa_uniprot. This file contains all GO annotations and information for proteins in the UniProt KnowledgeBase (UniProtKB) and for entities other than proteins.(uploaded 04-Mar-2020, collected 2020-04-09)
url = 'ftp://ftp.ebi.ac.uk/pub/databases/GO/goa/UNIPROT/goa_uniprot_all.gaf.gz'
wget.download(url, 'goa_uniprot_all.gaf.gz')
GO = gzip.open('goa_uniprot_all.gaf.gz', 'rb')
Go_annotation_uniprot = GO.read()
GO.close()
output = open('goa_uniprot_all.gaf', 'wb')
output.write(Go_annotation_uniprot)
output.close()
#Download BCL2 database(release 42.0, last updated 2020-02-18)
url = 'https://bcl2db.lyon.inserm.fr/BCL2DB/BCL2DBCellular'
wget.download(url, 'BCL2DBCellular')
#Downloading the classical proteins from BH3 motif from BCL2 database
url = 'https://bcl2db.lyon.inserm.fr/BCL2DB/BCL2DBBH3only'
wget.download(url, 'BCL2DBBH3only')
#Downloading other proteins with BH3 motif from BCL2 database
url = 'https://bcl2db.lyon.inserm.fr/BCL2DB/BCL2DBOtherBH3'
wget.download(url, 'BCL2DBOtherBH3')
#Download The Human Protein Atlas database (xml)/lysosome (Version: 19.3. Atlas, updated: 2020-03-06)
url = 'https://www.proteinatlas.org/search/Lysosome?format=xml'
wget.download(url, 'proteinAtlasLysosome.xml')
#Download The The Human Protein Atlas database (tsv)/lysosome (P.S. this file wasn't used, the above xml was used for the 'lysosome' search instead)
url = 'https://www.proteinatlas.org/search/lysosome?format=tsv'
wget.download(url, 'proteinAtlasLysosome.tsv')
#Download The The Human Protein Atlas database (tsv)/lysosomes
url = 'https://www.proteinatlas.org/search/lysosomes?format=tsv'
wget.download(url, 'proteinAtlasLysosomeS.tsv')
#Download The The Human Protein Atlas database (tsv)/lysosomal
url = 'https://www.proteinatlas.org/search/lysosomal?format=tsv'
wget.download(url, 'proteinAtlasLysosomAL.tsv')
#Download The The Human Protein Atlas database (tsv)/vesicle
import wget
url = 'https://www.proteinatlas.org/search/vesicle?format=tsv'
wget.download(url, 'proteinAtlasLysosomeVesicle.tsv')
#Downloading casbah database(data collected 2020-04-10)
url = 'http://bioinf.gen.tcd.ie/cgi-bin/casbah/casbah.pl'
wget.download(url, 'casbah.pl')
#Downloading Hela Spatial Proteome(data collected 2020-04-13)
url = 'http://mapofthecell.biochem.mpg.de/HeLa_Subcell_Localization_Summary.xlsx'
wget.download(url, 'Hela_Subcell_localization.xlsx')
#P.S. more data were downloaded from "https://www.cell.com/cell-reports/fulltext/S2211-1247(17)31188-9?_returnURL=https%3A%2F%2Flinkinghub.elsevier.com%2Fretrieve%2Fpii%2FS2211124717311889%3Fshowall%3Dtrue".
#These were in Table S1 from the supplemental material.
#Start by downloading Amigo Autophagy GO:0006914(release 2020-04)
url = 'http://golr-aux.geneontology.io/solr/select?defType=edismax&qt=standard&indent=on&wt=csv&rows=100000&start=0&fl=bioentity,bioentity_label,synonym,taxon_label,annotation_class_list,source&facet=true&facet.mincount=1&facet.sort=count&json.nl=arrarr&facet.limit=25&hl=true&hl.simple.pre=%3Cem%20class=%22hilite%22%3E&hl.snippets=1000&csv.encapsulator=&csv.separator=%09&csv.header=false&csv.mv.separator=%7C&fq=document_category:%22bioentity%22&facet.field=source&facet.field=taxon_subset_closure_label&facet.field=type&facet.field=panther_family_label&facet.field=annotation_class_list_label&facet.field=regulates_closure_label&q=GO:0006914&qf=bioentity%5E2&qf=bioentity_label_searchable%5E2&qf=bioentity_name_searchable%5E1&qf=bioentity_internal_id%5E1&qf=synonym_searchable%5E1&qf=isa_partof_closure_label_searchable%5E1&qf=regulates_closure%5E1&qf=regulates_closure_label_searchable%5E1&qf=panther_family_searchable%5E1&qf=panther_family_label_searchable%5E1&qf=taxon_label_searchable%5E1'
wget.download(url, 'AmiGo_Autophagy_geneproduct')
#Download Amigo lysosome GO:0005764(release 2020-04)
url = 'http://golr-aux.geneontology.io/solr/select?defType=edismax&qt=standard&indent=on&wt=csv&rows=100000&start=0&fl=bioentity,bioentity_label,synonym,taxon_label,annotation_class_list,source&facet=true&facet.mincount=1&facet.sort=count&json.nl=arrarr&facet.limit=25&hl=true&hl.simple.pre=%3Cem%20class=%22hilite%22%3E&hl.snippets=1000&csv.encapsulator=&csv.separator=%09&csv.header=false&csv.mv.separator=%7C&fq=document_category:%22bioentity%22&facet.field=source&facet.field=taxon_subset_closure_label&facet.field=type&facet.field=panther_family_label&facet.field=annotation_class_list_label&facet.field=regulates_closure_label&q=GO:0005764&qf=bioentity%5E2&qf=bioentity_label_searchable%5E2&qf=bioentity_name_searchable%5E1&qf=bioentity_internal_id%5E1&qf=synonym_searchable%5E1&qf=isa_partof_closure_label_searchable%5E1&qf=regulates_closure%5E1&qf=regulates_closure_label_searchable%5E1&qf=panther_family_searchable%5E1&qf=panther_family_label_searchable%5E1&qf=taxon_label_searchable%5E1'
wget.download(url, 'AmiGo_lysosome_geneproduct')
#Download Amigo cellDeath GO:0008219(release 2020-04)
url = 'http://golr-aux.geneontology.io/solr/select?defType=edismax&qt=standard&indent=on&wt=csv&rows=100000&start=0&fl=bioentity,bioentity_label,synonym,taxon_label,annotation_class_list,source&facet=true&facet.mincount=1&facet.sort=count&json.nl=arrarr&facet.limit=25&hl=true&hl.simple.pre=%3Cem%20class=%22hilite%22%3E&hl.snippets=1000&csv.encapsulator=&csv.separator=%09&csv.header=false&csv.mv.separator=%7C&fq=document_category:%22bioentity%22&facet.field=source&facet.field=taxon_subset_closure_label&facet.field=type&facet.field=panther_family_label&facet.field=annotation_class_list_label&facet.field=regulates_closure_label&q=GO:0008219&qf=bioentity%5E2&qf=bioentity_label_searchable%5E2&qf=bioentity_name_searchable%5E1&qf=bioentity_internal_id%5E1&qf=synonym_searchable%5E1&qf=isa_partof_closure_label_searchable%5E1&qf=regulates_closure%5E1&qf=regulates_closure_label_searchable%5E1&qf=panther_family_searchable%5E1&qf=panther_family_label_searchable%5E1&qf=taxon_label_searchable%5E1'
wget.download(url, 'AmiGo_cellDeath_geneproduct')
#Downloading The Human Autophagy Database(data collected 2020-04-15)
url = 'http://autophagy.lu/clustering/index.html'
wget.download(url, 'HumanAutophagydatabase.html')
| [
"noreply@github.com"
] | noreply@github.com |
b4ad40bd61d3ddb2d873ab757843b5d2b03823d7 | 9ce6a0eaba9f82d536ca4348a1594f90f5d67638 | /PrG14.py | 4f6cebbd15b25ed6a45025222b4684b470f0b358 | [] | no_license | Jeevankv/LearnPython | 028d57ac7b6b68d129e9769541ae509df8ef204d | 504b96795b6ccd107f0b176adc142246f9a26094 | refs/heads/master | 2022-12-21T15:53:27.669206 | 2020-09-01T10:36:38 | 2020-09-01T10:36:38 | 279,399,567 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | # actor1.py
#
# Simple attempt at actors
_registry = { }
def send(name, msg):
_registry[name].send(msg)
def actor(func):
def wrapper(*args, **kwargs):
gen = func(*args, **kwargs)
next(gen)
_registry[func.__name__] = gen
return wrapper
if __name__ == '__main__':
@actor
def printer():
while True:
msg = yield
print('printer:', msg)
printer()
n = 10
while n > 0:
send('printer', n)
n -= 1 | [
"noreply@github.com"
] | noreply@github.com |
5a5c3fab36ff919f45dc99298d251d4dee6b9026 | 5706ba3be1f7e11fee1cdef9a70e20e6c4e624c8 | /src/utils/ImageResizer.py | 94be85853d11a1aa251f94bc1ead17021029bf33 | [] | no_license | KoenduBuf/CVbyDL-Object-Property-Inference | 157269e41c0277aa6eb1ce5703a2c6e1089ffbec | 56711dc3092fb93ccd0dd4dbc04a1ac2ea0a4272 | refs/heads/main | 2023-05-10T11:19:36.908014 | 2021-06-18T19:25:46 | 2021-06-18T19:25:46 | 362,487,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | #!/usr/bin/env python3
import os
from PIL import Image
def img_file_extension(filename):
ext = os.path.splitext(filename)[1]
if not isinstance(ext, str):
ext = ext.decode('utf-8')
return ext.lower() in { '.jpg', '.jpeg', '.png' }
class Resizer:
def __init__(self, folder, tofolder):
self.folder = folder
self.tofolder = tofolder
def todo_images(self):
foldere = os.fsencode(self.folder)
for file in os.listdir(foldere):
filename = os.fsdecode(file)
if not img_file_extension(filename): continue
fromfile = os.path.join(self.folder, filename)
if os.path.isdir(fromfile): continue
yield (fromfile, filename)
def sizes_summary(self):
minw = minh = float('inf')
maxw = maxh = float('-inf')
for img_from, _ in self.todo_images():
img = Image.open(img_from)
minw = min(img.width, minw)
minh = min(img.height, minh)
maxw = max(img.width, maxw)
maxh = max(img.height, maxh)
return (minw, minh, maxw, maxh)
def autoresize(self, tosize, only_if_not_exists=True):
if isinstance(tosize, int):
tosize = (tosize, tosize)
fname = f"auto{tosize[0]}x{tosize[1]}"
tof = os.path.join(self.tofolder, fname)
if only_if_not_exists and os.path.isdir(tof):
return tof
os.makedirs(tof, exist_ok=True)
print(f"Converting images into {fname}")
for img_from, filename in self.todo_images():
img = Image.open(img_from)
if img.width != img.height:
raise Exception("Images should have aspect ratio 1:1")
resized = img.resize(tosize, Image.ANTIALIAS)
tofile = os.path.join(tof, filename)
resized.save(tofile)
return tof
if __name__ == '__main__':
resizer = Resizer('../images', '../images')
minw, minh, maxw, maxh = resizer.sizes_summary()
print(f"Width: {minw}-{maxw} | Height: {minh}-{maxh}")
resizer.autoresize(256, False)
| [
"koen.du.buf@gmail.com"
] | koen.du.buf@gmail.com |
21cc1ba23778a7ba76d8b97034ae2a2236266abf | 864acf7235e330123c3d68ed14cdd8bf8eed800b | /crm/accounts/models.py | be98a7f2e0926b1e0b0ec5e7fd8a599dfe9597b2 | [] | no_license | wahid999/djangostuff | 83f0ae53df5c53d192603d7aaf7ee72f8665c240 | c102edfb13b8ba39930e44069122c5e545ef00ee | refs/heads/main | 2023-07-04T20:02:32.550831 | 2021-08-13T16:07:20 | 2021-08-13T16:07:20 | 399,344,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,705 | py | from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Customer(models.Model):
user = models.OneToOneField(User, null=True, blank=True, on_delete=models.CASCADE)
name = models.CharField(max_length=200, null=True)
phone = models.CharField(max_length=200, null=True)
email = models.CharField(max_length=200, null=True)
profile_pic = models.ImageField(default="IMG_3940.JPG", null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
class Product(models.Model):
CATEGORY = (
('Indoor', 'Indoor'),
('Out Door', 'Out Door'),
)
name = models.CharField(max_length=200, null=True)
price = models.FloatField(null=True)
category = models.CharField(max_length=200, null=True, choices=CATEGORY)
description = models.CharField(max_length=200, null=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True, null=True)
tags = models.ManyToManyField(Tag)
def __str__(self):
return self.name
class Order(models.Model):
STATUS = (
('Pending', 'Pending'),
('Out for delivery', 'Out for delivery'),
('Delivered', 'Delivered'),
)
customer = models.ForeignKey(Customer, null=True, on_delete= models.SET_NULL)
product = models.ForeignKey(Product, null=True, on_delete= models.SET_NULL)
date_created = models.DateTimeField(auto_now_add=True, null=True)
status = models.CharField(max_length=200, null=True, choices=STATUS)
note = models.CharField(max_length=1000, null=True)
def __str__(self):
return self.product.name | [
"wahidhussainturi@gmail.com"
] | wahidhussainturi@gmail.com |
30c7dc196abffb5095f86a1a8f6e193b52fd75a4 | 80f9605422d4c219f87341446c46a38610b47b79 | /code/calculation.py | 0530b390cc18eaece1041143f933561640cb5ec7 | [] | no_license | shashwatpritish/magnemite | e2d778103fcbcba3bde09bf1613d6c4447e1220c | 717d7129c4d9d32d2d3279df2a193fdaf8de8559 | refs/heads/master | 2023-04-30T04:11:38.735038 | 2016-12-18T22:33:48 | 2016-12-18T22:33:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | import math
f_c = 5200
f_s = 11700
delta = 21
F_s = 3 * f_s
lambda_c = 2 * math.pi * f_c / F_s
print("Digital Frequency: ", lambda_c) # цифровая частота
lambda_s = 2 * math.pi * f_s / F_s
print("Sampling Frequency: ", lambda_s) # частота выборки
omega_c = math.tan(lambda_s/2) / math.tan(lambda_c/2)
print("Band Edge: ", omega_c) # граница полосы затухания
# 20 * log10((( 2.914 ^ (2 * n)) ^ 1/2)) >= 21
c = 1 / (math.tan(lambda_c / 2))
print('C: ', c)
| [
"noreply@github.com"
] | noreply@github.com |
79a0777522365b672aa322c73f3064e169938708 | 36a2a49f367346f1907487749c0eb107cb16bc7b | /filters.py | 1eaf841eeadc223a6fbb03251ee726d9d0a72e87 | [] | no_license | lukatmyshu/qrassassin | 78c505f67d83d8fb6b1d766a54fd6c986fb33b13 | 8435b6556f731a2429f3628a7169934a77b21320 | refs/heads/master | 2021-01-20T23:26:31.028930 | 2011-01-07T21:46:39 | 2011-01-07T21:47:29 | 1,231,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py | from google.appengine.ext import webapp
from util import *
register = webapp.template.create_template_register()
@register.filter
def make_user_url(userid):
digest = make_digest(userid)
return "/user/%s/%s" % (userid, digest)
| [
"vijay@vcr.corp.meebo.com"
] | vijay@vcr.corp.meebo.com |
7e06dd17c6c8f3382921b07d5a29bfd3f67c4817 | 846e642fd9b01d3b500d3efba4790761039eec24 | /code/smtp.py | 978b10738307ac891f4680f1e0a033f0d1ac1892 | [] | no_license | sachinyadav3496/Machine_Learning_Workshop | ffea23799c0f8477d9b5cc19b98e7d33a6364390 | 37f433631d1ae4e4db37c4baae6cdc3a7619423e | refs/heads/master | 2020-11-24T11:49:45.936367 | 2020-05-01T08:38:10 | 2020-05-01T08:38:10 | 228,130,385 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,080 | py |
import smtplib
import getpass
def Main():
print("\n\n*************************welcome************************\n")
print("\nWelcom to Email Service \n")
print("Enter your login details - \n")
gmail_user = input("\n\nUserName : ")
gmail_password = getpass.getpass("Password : ")
try:
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
print("\n\nConnection established ")
server.ehlo()
server.login(gmail_user, gmail_password)
print("\n\nYou have Successfully logged in your account ",gmail_user)
except Exception as e:
print("\n\nError!!! in Connection ")
print(e)
exit(0)
sent_from = gmail_user
i = int(input("\n\nEnter no. of recipients - "))
print("\n\nEnter Recipients Email Addressess - \n")
to = []
for k in range(i):
to.append(input())
print()
subject = input("\n\nPlease Type in Subject of The Mail - ")
print("\n\nType in Your Message (Type in EOF to FINISH)\n\n")
message=[]
while True:
msg = input()
if msg.upper() == 'EOF' :
break
else :
message.append(msg)
print("\n\nMessege is Ready for Delivery\n\n ")
body = '\n'.join(message)
email_text = """From:%s
To:%s
Subject:%s
%s
"""%(sent_from, ", ".join(to), subject, body)
try:
print("\n\nEmail sending is in process - \n ")
server.sendmail(sent_from, to, email_text)
server.close()
except Exception as e:
print('\nSomething went wrong...',e)
else:
print("\nMessage Delivered to - \n")
for i in to:
print(i)
print()
print("\n\n**********************Exiting********************\n\n")
print("\n\nThanks For using Mail Service \n\n")
if __name__ == "__main__":
Main()
| [
"sachinyadav3496@gmail.com"
] | sachinyadav3496@gmail.com |
7c5b1c7af5084e6f3d0c140bf2c27010d19889f6 | 42cfca63174c61b759fb853708acf4c0ff25cda2 | /mayeye/WebSocket.py | df28b95b0616ebe64dd6fd5837981b3241896b2c | [] | no_license | Ericgoodboy/mayeye | 4a4e84413ccdd07c6b446abefaf34e6533d68902 | d27c50302c5ca694d04b9784bfaec53f735687e9 | refs/heads/master | 2020-03-18T03:14:03.376295 | 2019-09-18T13:20:10 | 2019-09-18T13:20:10 | 134,229,726 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | from socket import *
import _thread
def log(*args):
print(*args)
ip_port = ("127.0.0.1",1923)
class WebSocket(object):
def __init__(self):
self.back_log = 5 # 连接池
self.server = socket(AF_INET, SOCK_STREAM)
self.server.bind(ip_port)
self.handleMap = {}
#self.loadMiddleWare()
log("开始监听")
def listen(self):
self.server.listen(self.back_log)
while True:
conn, addr = self.server.accept()
log("接收到来自:" + str(addr) + "的消息:")
def f(conn):
data = conn.recv(10000)
log(data.decode("utf-8"))
conn.send("hello world".encode("utf-8"))
_thread.start_new_thread(f, (conn,))
if __name__ == '__main__':
ws = WebSocket()
ws.listen()
| [
"776938336@qq.com"
] | 776938336@qq.com |
61f0e4b2c9282b83977912f198e755183d4531c0 | 33753890e0af2afa1bf0a1ccae74d12088cbdfad | /inventory/admin.py | c31657e3618e7fc734e32c51b9caef33a9e16fc5 | [] | no_license | MihailShishow/inventory | 4530fa5a6e299509464cfd228c771a8535244b66 | 627f83d1bc64114bc408d122710eaa14ab4b2c54 | refs/heads/master | 2022-12-13T00:45:12.062871 | 2020-01-29T13:16:40 | 2020-01-29T13:16:40 | 224,256,608 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | from django.contrib import admin
from .models import *
# Register your models here.
admin.site.register(ProductCategory)
admin.site.register(Product)
admin.site.register(Counterparty)
admin.site.register(ProductIncome)
admin.site.register(ProductOutcome)
admin.site.register(IncomeWarrant)
admin.site.register(OutcomeWarrant)
admin.site.register(IncomeInvoice)
admin.site.register(OutcomeInvoice) | [
"fanoffm@gmail.com"
] | fanoffm@gmail.com |
6ad749b446664d9299143f36496269a2281f3e3d | 8cb7e95552d9b55282a26b39eef8223ad5a998a5 | /tests.py | 93e1e420b7f8dedbe4386bb28761d13079a3eb66 | [
"MIT"
] | permissive | coleifer/ucache | c3f8949a710edd2e19ae18ae32eeccc1e1100837 | 1b2ac7ca8c67dd895b45d0fddcc7b5542b0b8fd9 | refs/heads/master | 2022-12-28T00:49:59.736055 | 2022-12-14T02:41:29 | 2022-12-14T02:41:29 | 165,595,140 | 72 | 8 | null | 2019-05-12T12:37:44 | 2019-01-14T04:26:22 | Python | UTF-8 | Python | false | false | 10,273 | py | #!/usr/bin/env python
import glob
import os
import sys
import time
import unittest
from ucache import *
class BaseTestCache(object):
cache_files = []
def get_cache(self, **kwargs):
raise NotImplementedError
def cleanup(self):
for filename in self.cache_files:
if os.path.exists(filename):
os.unlink(filename)
def setUp(self):
self.cache = self.get_cache()
super(BaseTestCache, self).setUp()
def tearDown(self):
self.cache.set_prefix()
self.cache.close()
self.cleanup()
super(BaseTestCache, self).tearDown()
def test_operations(self):
test_data = (
('k1', 'v1'),
('k2', 2),
('k3', None),
('k4', [0, '1', [2]]),
('k5', {'6': ['7', 8, {'9': '10', '11': 12}]}),
)
test_data_dict = dict(test_data)
for key, value in test_data:
self.cache.set(key, value, 60)
for key, value in test_data:
self.assertEqual(self.cache.get(key), value)
self.cache.delete('k1')
self.cache.delete('k3')
self.cache.delete('k5')
for key in ('k1', 'k3', 'k5'):
self.assertIsNone(self.cache.get(key))
for key in ('k2', 'k4'):
self.assertEqual(self.cache.get(key), test_data_dict[key])
self.cache.flush()
self.assertIsNone(self.cache.get('k2'))
self.assertIsNone(self.cache.get('k4'))
def test_bulk_operations(self):
test_data = {
'k1': 'v1',
'k2': 2,
'k3': [0, '1', [2]]}
# Do simple bulk-set.
self.cache.set_many(test_data, timeout=60)
# Do single-set to ensure compatible with bulk-get.
self.cache.set('k4', 'v4')
# Compare results of bulk-get.
self.assertEqual(self.cache.get_many(['k1', 'k2', 'k3', 'k4']), {
'k1': 'v1',
'k2': 2,
'k3': [0, '1', [2]],
'k4': 'v4'})
# Do individual gets to ensure methods are compatible.
self.assertEqual(self.cache.get('k1'), test_data['k1'])
self.assertEqual(self.cache.get('k3'), test_data['k3'])
# Do bulk-delete.
self.cache.delete_many(['k1', 'k3', 'kx'])
self.assertTrue(self.cache['k1'] is None)
self.assertTrue(self.cache['k2'] is not None)
self.assertTrue(self.cache['k3'] is None)
self.assertEqual(self.cache.get_many(['k1', 'k2', 'k3']), {'k2': 2})
# Do single-delete to ensure compatibility.
self.cache.delete('k2')
self.assertTrue(self.cache['k2'] is None)
def test_preload(self):
self.cache.set_many({'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}, timeout=60)
self.assertEqual(self.cache.get('k1'), 'v1')
self.assertTrue(self.cache.get('kx') is None)
with self.cache.preload(['k1', 'k3']):
self.assertEqual(self.cache.get('k1'), 'v1')
self.assertEqual(self.cache.get('k3'), 'v3')
self.assertTrue(self.cache.get('kx') is None)
self.cache._preload['kx'] = 'preloaded'
self.assertEqual(self.cache.get('kx'), 'preloaded')
self.assertEqual(self.cache.get('k1'), 'v1')
self.assertEqual(self.cache.get('k2'), 'v2')
self.assertEqual(self.cache.get('k3'), 'v3')
self.assertTrue(self.cache.get('kx') is None)
def assertWrites(self, n):
self.assertEqual(self.cache.stats['writes'], n)
def assertHits(self, n):
self.assertEqual(self.cache.stats['hits'], n)
def assertPLHits(self, n):
self.assertEqual(self.cache.stats['preload_hits'], n)
def test_preload_re_set(self):
self.cache.set_many({'k1': 'v1', 'k2': 'v2', 'k3': 'v3'}, timeout=60)
self.assertWrites(3)
with self.cache.preload(['k1', 'k2']):
self.assertHits(2)
with self.cache.preload(['k3']):
self.assertHits(3)
self.assertPLHits(0)
self.assertEqual(self.cache.get('k1'), 'v1')
self.assertEqual(self.cache.get('k2'), 'v2')
self.assertEqual(self.cache.get('k3'), 'v3')
# No more actual trips to the backend - we are pulling from the
# preload cache.
self.assertHits(3)
self.assertPLHits(3)
self.cache.set('k2', 'v2-x')
self.assertWrites(4)
self.assertEqual(self.cache.get('k2'), 'v2-x')
self.assertHits(3)
self.assertPLHits(4)
# We lost the scope that k2 was set in, and get a stale value back.
self.assertEqual(self.cache.get('k2'), 'v2')
self.assertHits(3)
self.assertPLHits(5)
# Lost scope for k3, make trip to the cache.
self.assertEqual(self.cache.get('k3'), 'v3')
self.assertHits(4)
self.assertPLHits(5)
def test_decorator(self):
@self.cache.cached(10)
def fn(seed=None):
return time.time()
value = fn()
time.sleep(0.001)
self.assertEqual(fn(), value)
fn.bust()
self.assertFalse(fn() == value)
self.assertEqual(fn(), fn())
self.assertFalse(fn(1) == fn(2))
self.assertEqual(fn(2), fn(2))
def test_property(self):
class Dummy(object):
@self.cache.cached_property
def fn(self):
return time.time()
d = Dummy()
value = d.fn
time.sleep(0.001)
self.assertEqual(d.fn, value)
def test_compression(self):
self.cache.close()
self.cleanup()
cache = self.get_cache(compression=True)
data = {'k1': 'a' * 1024, 'k2': 'b' * 512, 'k3': 'c' * 200}
cache.set_many(data, timeout=60)
cache.set('k4', 'd' * 1024, timeout=60)
self.assertEqual(cache.get('k4'), 'd' * 1024)
res = cache.get_many(['k1', 'k2', 'k3'])
self.assertEqual(res, data)
cache.delete_many(['k1', 'k2', 'k3', 'k4'])
def test_read_expired(self):
self.cache.set('k1', 'v1', -1)
self.assertTrue(self.cache.get('k1') is None)
def test_clean_expired(self):
if not self.cache.manual_expire:
return
day = 86400
for i in range(1, 7):
self.cache.set('k%s' % i, 'v%s' % i, (-i * day) - 1)
self.cache.set('ka', 'va', -5)
self.cache.set('kb', 'vb', 60)
self.cache.set('kc', 'vc', day)
# k1, -1 days ... k6, -6 days.
self.assertTrue(self.cache.get('k4') is None) # k4 is also deleted.
self.assertEqual(self.cache.clean_expired(3), 3) # k3, k5, k6.
self.assertEqual(self.cache.clean_expired(3), 0)
self.assertEqual(self.cache.clean_expired(1), 2) # k1, k2.
self.assertEqual(self.cache.clean_expired(), 1) # ka.
self.assertEqual(self.cache.clean_expired(), 0)
# Cannot retrieve any of the expired data.
for i in range(1, 7):
self.assertTrue(self.cache.get('k%s' % i) is None)
# Set some new expired keys and values.
for i in range(3):
self.cache.set('k%s' % i, 'v%s' % i, -3)
self.assertTrue(self.cache.get('k1') is None)
self.assertEqual(self.cache.clean_expired(), 2)
self.assertEqual(self.cache.clean_expired(), 0)
# Set expired key to a valid time.
self.cache.set('k1', 'v1', 60)
self.assertEqual(self.cache.get('k1'), 'v1')
# Our original keys are still present.
self.assertEqual(self.cache.get('kb'), 'vb')
self.assertEqual(self.cache.get('kc'), 'vc')
def test_prefix_and_flush(self):
self.cache.set_prefix('a')
self.cache.set('k0', 'v0-1')
self.cache.set('k1', 'v1-1')
self.cache.set_prefix('b')
self.cache.set('k0', 'v0-2')
# Check that keys and values are isolated properly by prefix.
self.cache.set_prefix('a')
self.assertEqual(self.cache.get('k0'), 'v0-1')
self.cache.set_prefix('b')
self.assertEqual(self.cache.get('k0'), 'v0-2')
self.cache.set_prefix('a')
try:
self.cache.flush()
except NotImplementedError:
# Memcached does not support prefix match, so we skip.
return
self.assertTrue(self.cache.get('k0') is None)
self.assertTrue(self.cache.get('k1') is None)
self.cache.set_prefix('b')
self.assertEqual(self.cache.get('k0'), 'v0-2')
self.assertTrue(self.cache.get('k1') is None)
class TestKTCache(BaseTestCache, unittest.TestCase):
def cleanup(self):
self.cache.close(close_all=True)
def get_cache(self, **kwargs):
return KTCache(connection_pool=False, **kwargs)
class TestSqliteCache(BaseTestCache, unittest.TestCase):
cache_files = ['sqlite_cache.db']
def get_cache(self, **kwargs):
return SqliteCache('sqlite_cache.db', **kwargs)
class TestRedisCache(BaseTestCache, unittest.TestCase):
def get_cache(self, **kwargs):
return RedisCache(**kwargs)
def test_read_expired(self):
# Redis doesn't support setting a negative timeout.
pass
class TestKCCache(BaseTestCache, unittest.TestCase):
def get_cache(self, **kwargs):
return KCCache(filename='*', **kwargs)
class TestMemcacheCache(BaseTestCache, unittest.TestCase):
def get_cache(self, **kwargs):
return MemcacheCache(**kwargs)
class TestPyMemcacheCache(BaseTestCache, unittest.TestCase):
def get_cache(self, **kwargs):
return PyMemcacheCache(**kwargs)
class TestMemoryCache(BaseTestCache, unittest.TestCase):
def get_cache(self, **kwargs):
return MemoryCache(**kwargs)
class TestDbmCache(BaseTestCache, unittest.TestCase):
@property
def cache_files(self):
return glob.glob('dbmcache.*')
def get_cache(self, **kwargs):
return DbmCache('dbmcache.db', **kwargs)
class TestGreenDBCache(BaseTestCache, unittest.TestCase):
def get_cache(self, **kwargs):
return GreenDBCache(**kwargs)
if __name__ == '__main__':
unittest.main(argv=sys.argv)
| [
"coleifer@gmail.com"
] | coleifer@gmail.com |
186763da45df57b945fb4e25891451e063e0e17b | 897a8d338f42a0f76cc415d3366d6032f32f65b0 | /homework/3주차숙제_최종용.py | db2a326986ebe580205e99ba318a9a5d2fd32874 | [] | no_license | dreambear00/sparta00 | 351ab9695176a68f7ca19bb1babd0b3edc194765 | 190621bc5044602265730c10717fa46c8bc662d6 | refs/heads/master | 2022-09-27T11:39:43.124220 | 2020-06-05T18:31:40 | 2020-06-05T18:31:40 | 264,386,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,445 | py | import requests
from bs4 import BeautifulSoup
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get('https://www.genie.co.kr/chart/top200?ditc=D&rtm=Y',headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
chart = soup.select('#body-content > div.newest-list > div > table > tbody > tr.list')
for song in chart:
number = song.select_one('td.number').text[0:3].strip()
title = song.select_one('td.info > a.title.ellipsis').text.strip()
artist = song.select_one('td.info > a.artist.ellipsis').text.strip('.')
result = artist.split('.')
print(number, title, result)
import requests
from bs4 import BeautifulSoup
headers = {'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}
data = requests.get('https://www.genie.co.kr/chart/top200?ditc=D&ymd=20200530&hh=01&rtm=Y&pg=2',headers=headers)
soup = BeautifulSoup(data.text, 'html.parser')
chart = soup.select('#body-content > div.newest-list > div > table > tbody > tr.list')
for song in chart:
number = song.select_one('td.number').text[0:3].strip()
title = song.select_one('td.info > a.title.ellipsis').text.strip()
artist = song.select_one('td.info > a.artist.ellipsis').text.strip('.')
result = artist.split('.')
print(number, title, result) | [
"dreambear00@users.noreply.github.com"
] | dreambear00@users.noreply.github.com |
577b5b8dfaff572f4dc8c07ec7bfae8a445d82d5 | 4ba1235ec618513df3f13c243f4430fe657f5610 | /ex35.py | 10d3e51a84c50b037e8e290a607ba86aead44ddd | [] | no_license | AuraIV/lpthw | 414e79e4be182b98da9cde2f9b7134903783b47d | 573d3e17684791f25fd34b0289d4a622db6e5312 | refs/heads/master | 2021-01-13T00:55:55.173380 | 2015-12-31T20:24:31 | 2015-12-31T20:24:31 | 48,452,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,747 | py | from sys import exit
def gold_room():
print "This room is full of gold. How much do you take?"
choice = raw_input("> ")
#if "0" in choice or "1" in choice:
try:
how_much = int(choice)
except ValueError:
dead("Man, learn to type a number.")
if how_much < 50:
print "Nice, you're not greedy, you win!"
exit(0)
else:
dead("you greedy bastard!")
def bear_room():
print "There is a bear here."
print "The bear has a bunch of honey."
print "The fat bear is in front of another door."
print "How are you going to move the bear?"
bear_moved = False
while True:
choice = raw_input("> ")
if choice == "take honey":
dead("The bear looks at you and then slaps your face off.")
elif choice == "taunt bear" and not bear_moved:
print "The bear has moved from the door. You can go through it now."
bear_moved = True
elif choice == "taunt bear" and bear_moved:
dead("The bear gets pissed off and chews your leg off.")
elif choice == "open door" and bear_moved:
gold_room()
else:
print "I got no idea what that means."
def cthulhu_room():
print "Here you see the great evil Cthulhu"
print "He, it, whatever stares at you and you go insane."
print "Do you flee for your life or eat your head?"
choice = raw_input("> ")
if "flee" in choice:
start()
elif "head" in choice:
dead("Well that was tasty!")
else:
cthulhu_room()
def dead(why):
print why, "Good job!"
exit()
def start():
print "You are in a dark room."
print "There is a door to your right and left."
print "Which one do you take?"
choice = raw_input("> ")
if choice == "left":
bear_room()
elif choice == "right":
cthulhu_room()
else:
dead("You stumble around the room until you starve.")
start()
| [
"aura.velarde@gmail.com"
] | aura.velarde@gmail.com |
93e0d1af53bc2b9efd06b47d2a1c4276bdb0b0bd | 5390d79dad71ad0d9ff9d0777435dcaf4aad16b3 | /chapter_06/favorite_number.py | 124b9763eeac8593df0e93e0c0e845aa9bc3e5dd | [] | no_license | JasperMi/python_learning | 19770d79cce900d968cec76dac11e45a3df9c34c | 8111d0d12e4608484864dddb597522c6c60b54e8 | refs/heads/master | 2020-11-26T08:57:02.983869 | 2020-03-11T10:14:55 | 2020-03-11T10:14:55 | 218,935,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | favorite_numbers = {
'bob': 2,
'sarah': 6,
'martin': 8,
'katy': 9,
'tom': 10
}
print('bob' + "'s favorite number is " + str(favorite_numbers['bob']) + ".")
print('sarah' + "'s favorite number is " + str(favorite_numbers['sarah']) + ".")
print('martin' + "'s favorite number is " + str(favorite_numbers['martin']) + ".")
print('katy' + "'s favorite number is " + str(favorite_numbers['katy']) + ".")
print('tom' + "'s favorite number is " + str(favorite_numbers['tom']) + ".")
| [
"darmi19@163.com"
] | darmi19@163.com |
5581a91ff324a9d01492f9aa470ca5b410c748e3 | f2053594612d1b3e56a93653892941875d305288 | /ordenamiento.py | 1c6005d0834e18925702ca9b925622a83e7de323 | [] | no_license | jesuslz/Automatization | 3b3c87c38005c533c2d08a1c8eda97cf7c7f8f79 | 082353be1bd094afe17dda405c17248089d2ef98 | refs/heads/master | 2020-03-25T19:09:05.467184 | 2018-11-21T17:28:43 | 2018-11-21T17:28:43 | 144,067,061 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,756 | py |
import openpyxl
import numpy as np
def alignment(a, b):
'''El primer parametro de esta funcion siempre debe ser
el vector base'''
a.sort()
b.sort()
#hacerlos del mismo tamaño
for i in range(len(a)-len(b)):
b.append(None)
for i in range(len(a)):
if not a[i] == b[i]:
b.insert(i, None)
b.pop(-1)
return a, b
file_xlsx = openpyxl.load_workbook(
'FLOW Operation and support report ASAP 7.0.2 JAMU66, WASS133 2018-08-13.xlsx', data_only=True)
sheet = file_xlsx['week jamu']
#print(y)
wos_host = np.array(list(sheet['A678':'A735']))
wos_host_data = [[wos_host[i][j].value for j in range(
wos_host.shape[1])]for i in range(wos_host.shape[0])]
wos_host_x = np.array([wos_host_data[i][0].split()
for i in range(len(wos_host_data))])
failed = np.array(list(sheet['A632':'A672']))
failed = [[failed[i][j].value for j in range(failed.shape[1])] for i in range(failed.shape[0])]
failed_x = np.array([failed[i][0].split() for i in range(len(failed))])
wos_host_list = list(wos_host_x[:,1])
failed_list = list(failed_x[:,1])
wos_host_list, failed_list = alignment(wos_host_list, failed_list)
print('Wos' + '\t' + 'failed' + '\n')
for i in range(len(wos_host_list)):
print(str(wos_host_list[i]) + '\t' + str(failed_list[i]) + '\n')
import dictionary as dic
wos_hostDatos = list(wos_host_x[:, 0])
failedDatos = list(failed_x[:,0])
d2 = [list((int(wos_hostDatos[i]), wos_host_list[i])) for i in range(len(wos_host_list))]
#d1 = list(np.column_stack((wos_hostDatos, wos_host_list)))
#d2 = [list(d1[i][:]) for i in range(len(d1))]
print(d2)
#print(d2)
import escritura
#print(list(failed_x))
escritura.write_xlsx_document(d2, 'failed.xlsx')
| [
"jlopez@integssoft.com"
] | jlopez@integssoft.com |
d0a3294bde5578af9e31e1a16451cde36d4dccd7 | 326e58cc44ebba305b37b7cb74387d9ac1e72703 | /keras_wide_deep_98_table_15GB/python/lib/read_conf.py | b08fb022b1d7ce9d91175da3e1f90f4ad80076fb | [
"MIT"
] | permissive | WenqiJiang/FPGA-Accelerator-for-Recommender-Systems | ef0d02abf75d522b511ee1b51bd5c2f591b6f79a | 6c3031487cd1447b7f5362483c14b108177387bb | refs/heads/master | 2021-07-07T22:23:13.270004 | 2021-06-09T21:46:22 | 2021-06-09T21:46:22 | 244,604,800 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,581 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: lapis-hong
# @Date : 2018/1/24
"""Read All Configuration from wide_deep/conf/*.yaml"""
import os
import yaml
from os.path import dirname, abspath
BASE_DIR = os.path.join(dirname(dirname(dirname(abspath(__file__)))), 'conf')
SCHEMA_CONF_FILE = 'schema.yaml'
DATA_PROCESS_CONF_FILE = 'data_process.yaml'
FEATURE_CONF_FILE = 'feature.yaml'
CROSS_FEATURE_CONF_FILE = 'cross_feature.yaml'
MODEL_CONF_FILE = 'model.yaml'
TRAIN_CONF_FILE = 'train.yaml'
SERVING_CONF_FILE = 'serving.yaml'
class Config(object):
"""Config class
Class attributes: config, train, distribution, model, runconfig, serving
"""
def __init__(self,
schema_conf_file=SCHEMA_CONF_FILE,
data_process_conf_file=DATA_PROCESS_CONF_FILE,
feature_conf_file=FEATURE_CONF_FILE,
cross_feature_conf_file=CROSS_FEATURE_CONF_FILE,
model_conf_file=MODEL_CONF_FILE,
train_conf_file=TRAIN_CONF_FILE,
serving_conf_file=SERVING_CONF_FILE):
self._schema_conf_file = os.path.join(BASE_DIR, schema_conf_file)
self._data_process_conf_file = os.path.join(BASE_DIR, data_process_conf_file)
self._feature_conf_file = os.path.join(BASE_DIR, feature_conf_file)
self._cross_feature_conf_file = os.path.join(BASE_DIR, cross_feature_conf_file)
self._model_conf_file = os.path.join(BASE_DIR, model_conf_file)
self._train_conf_file = os.path.join(BASE_DIR, train_conf_file)
self._serving_conf_file = os.path.join(BASE_DIR, serving_conf_file)
def read_schema(self):
with open(self._schema_conf_file) as f:
return {k: v.lower() for k, v in yaml.load(f).items()}
def read_data_process_conf(self):
with open(self._data_process_conf_file) as f:
return yaml.load(f)
@staticmethod
def _check_feature_conf(feature, valid_feature_name, **kwargs):
type_ = kwargs["type"]
trans = kwargs["transform"]
param = kwargs["parameter"]
if type_ is None:
raise ValueError("Type are required in feature conf, "
"found empty value for feature `{}`".format(feature))
if feature not in valid_feature_name:
raise ValueError("Invalid feature name `{}` in feature conf, "
"must be consistent with schema conf".format(feature))
assert type_ in {'category', 'continuous'}, (
"Invalid type `{}` for feature `{}` in feature conf, "
"must be 'category' or 'continuous'".format(type_, feature))
# check transform and parameter
if type_ == 'category':
assert trans in {'hash_bucket', 'identity', 'vocab'}, (
"Invalid transform `{}` for feature `{}` in feature conf, "
"must be one of `hash_bucket`, `vocab`, `identity`.".format(trans, feature))
if trans == 'hash_bucket' or trans == 'identity':
if not isinstance(param, int):
raise TypeError('Invalid parameter `{}` for feature `{}` in feature conf, '
'{} parameter must be an integer.'.format(param, feature, trans))
elif trans == 'vocab':
if not isinstance(param, (tuple, list)):
raise TypeError('Invalid parameter `{}` for feature `{}` in feature conf, '
'vocab parameter must be a list.'.format(param, feature))
else:
normalization, boundaries = param['normalization'], param['boundaries']
if trans:
assert trans in {'min_max', 'log', 'standard'}, \
"Invalid transform `{}` for feature `{}` in feature conf, " \
"continuous feature transform must be `min_max` or `log` or `standard`.".format(trans, feature)
if trans == 'min_max' or 'standard':
if not isinstance(normalization, (list, tuple)) or len(normalization) != 2:
raise TypeError('Invalid normalization parameter `{}` for feature `{}` in feature conf, '
'must be 2 elements list for `min_max` or `standard` scaler.'.format(normalization, feature))
if trans == 'min_max':
min_, max_ = normalization
if not isinstance(min_, (float, int)) or not isinstance(max_, (float, int)):
raise TypeError('Invalid normalization parameter `{}` for feature `{}` in feature conf, '
'list elements must be int or float.'.format(normalization, feature))
assert min_ < max_, ('Invalid normalization parameter `{}` for feature `{}` in feature conf, '
'[min, max] list elements must be min<max'.format(normalization, feature))
elif trans == 'standard':
mean, std = normalization
if not isinstance(mean, (float, int)):
raise TypeError('Invalid normalization parameter `{}` for feature `{}` in feature conf, '
'parameter mean must be int or float.'.format(mean, feature))
if not isinstance(std, (float, int)) or std <= 0:
raise TypeError('Invalid normalization parameter `{}` for feature `{}` in feature conf, '
'parameter std must be a positive number.'.format(std, feature))
if boundaries:
if not isinstance(boundaries, (tuple, list)):
raise TypeError('Invalid parameter `{}` for feature `{}` in feature conf, '
'discretize parameter must be a list.'.format(boundaries, feature))
else:
for v in boundaries:
assert isinstance(v, (int, float)), \
"Invalid parameter `{}` for feature `{}` in feature conf, " \
"discretize parameter element must be integer or float.".format(boundaries, feature)
@staticmethod
def _check_cross_feature_conf(features, feature_conf, **kwargs):
features_list = [f.strip() for f in features.split('&')]
hash_bucket_size = kwargs["hash_bucket_size"]
is_deep = kwargs["is_deep"]
assert len(features_list) > 1, (
'Invalid cross feature name `{}` in cross feature conf,'
'at least 2 features'.format(features))
for f in features_list:
if f not in feature_conf:
print(f)
raise ValueError("Invalid cross feature name `{}` in cross feature conf, "
"must be consistent with feature conf".format(features))
if feature_conf[f]['type'] == 'continuous':
assert feature_conf[f]['parameter']['boundaries'] is not None, \
'Continuous feature must be set bounaries to be bucketized in feature conf as cross feature'
if hash_bucket_size:
assert isinstance(hash_bucket_size, (int, float)), (
'Invalid hash_bucket_size `{}` for features `{}` in cross feature conf, '
'expected int or float'.format(hash_bucket_size, features))
if is_deep:
assert is_deep in {0, 1}, (
'Invalid is_deep `{}` for features `{}`, '
'expected 0 or 1.'.format(is_deep, features))
def read_feature_conf(self):
with open(self._feature_conf_file) as f:
feature_conf = yaml.load(f)
valid_feature_name = self.read_schema().values()
for feature, conf in feature_conf.items():
self._check_feature_conf(feature.lower(), valid_feature_name, **conf)
return feature_conf
def read_cross_feature_conf(self):
with open(self._cross_feature_conf_file) as f:
cross_feature_conf = yaml.load(f)
conf_list = []
feature_conf = self.read_feature_conf() # used features
# print(feature_conf)
for features, conf in cross_feature_conf.items():
self._check_cross_feature_conf(features, feature_conf, **conf)
features = [f.strip() for f in features.split('&')]
hash_bucket_size = int(1000*conf["hash_bucket_size"]) or 10000 # defaults to 10k
is_deep = conf["is_deep"] if conf["is_deep"] is not None else 1 # defaults to 10k
conf_list.append((features, hash_bucket_size, is_deep))
return conf_list
@staticmethod
def _check_numeric(key, value):
if not isinstance(value, (int, float)):
raise ValueError('Numeric type is required for key `{}`, found `{}`.'.format(key, value))
@staticmethod
def _check_string(key, value):
if not isinstance(value, (str)):
raise ValueError('String type is required for key `{}`, found `{}`.'.format(key, value))
@staticmethod
def _check_bool(key, value):
if value not in {True, False, 1, 0}:
raise ValueError('Bool type is required for key `{}`, found `{}`.'.format(key, value))
@staticmethod
def _check_list(key, value):
if not isinstance(value, (list, tuple)):
raise ValueError('List type is required for key `{}`, found `{}`.'.format(key, value))
@staticmethod
def _check_required(key, value):
if value is None:
raise ValueError('Required type for key `{}`, found None.'.format(key))
def _read_model_conf(self):
# required string params
req_str_keys = ['linear_optimizer', 'dnn_optimizer', 'dnn_connected_mode', 'dnn_activation_function'
'cnn_optimizer']
# optional int or float params
opt_num_keys = ['linear_initial_learning_rate', 'linear_decay_rate', 'dnn_initial_learning_rate',
'dnn_decay_rate', 'dnn_l1', 'dnn_l2']
# optional bool params
opt_bool_keys = ['dnn_batch_normalization', 'cnn_use_flag']
#
req_list_keys = ['dnn_hidden_units']
with open(self._model_conf_file) as f:
model_conf = yaml.load(f)
for k, v in model_conf.items():
if k in req_str_keys:
self._check_required(k, v)
self._check_string(k, v)
elif k in opt_num_keys:
if v:
self._check_numeric(k, v)
elif k in opt_bool_keys:
if v:
self._check_bool(k, v)
elif k in req_list_keys:
self._check_required(k, v)
self._check_list(k, v)
return model_conf
def _read_train_conf(self):
req_str_keys = ['model_dir', 'model_type', 'train_data', 'test_data']
req_num_keys = ['train_epochs', 'epochs_per_eval', 'batch_size', 'num_examples']
opt_num_keys = ['pos_sample_loss_weight', 'neg_sample_loss_weight', 'num_parallel_calls']
req_bool_key = ['keep_train', 'multivalue', 'dynamic_train']
with open(self._train_conf_file) as f:
train_conf = yaml.load(f)
for k, v in train_conf['train'].items():
if k in req_str_keys:
self._check_required(k, v)
self._check_string(k, v)
elif k in req_num_keys:
self._check_required(k, v)
self._check_numeric(k, v)
elif k in opt_num_keys:
if v:
self._check_numeric(k, v)
elif k in req_bool_key:
self._check_required(k, v)
self._check_bool(k, v)
return train_conf
def _read_serving_conf(self):
with open(self._serving_conf_file) as f:
return yaml.load(f)
@property
def config(self):
return self._read_train_conf()
@property
def train(self):
return self._read_train_conf()["train"]
@property
def distribution(self):
return self._read_train_conf()["distribution"]
@property
def runconfig(self):
return self._read_train_conf()["runconfig"]
@property
def model(self):
return self._read_model_conf()
@property
def serving(self):
return self._read_serving_conf()
def get_feature_name(self, feature_type='all'):
"""
Args:
feature_type: one of {'all', 'used', 'category', 'continuous'}
Return: feature name list
"""
feature_conf_dic = self.read_feature_conf()
feature_list = list(self.read_schema().values())
# feature_list.remove('clk0')
# feature_list.remove('clk1')
# feature_list.remove('clk2')
if feature_type == 'all':
return feature_list
elif feature_type == 'used':
return feature_conf_dic.keys()
elif feature_type == 'unused':
return set(feature_list) - set(feature_conf_dic.keys())
elif feature_type == 'category':
return [feature for feature, conf in feature_conf_dic.items() if conf['type'] == 'category']
elif feature_type == 'continuous':
return [feature for feature, conf in feature_conf_dic.items() if conf['type'] == 'continuous']
else:
raise ValueError("Invalid parameter, must be one of 'all', 'used', 'category, 'continuous")
def _test():
config = Config()
"""test for Config methods"""
print('\nTrain config:')
print(config.config)
print(config.train)
print(config.runconfig)
print(config.train["model_dir"])
print('\nModel conf:')
for k, v in config.model.items():
print(k, v)
feature_conf_dic = config.read_feature_conf()
print('\nFeature conf:')
for k, v in feature_conf_dic.items():
print(k, v)
cross_feature_list = config.read_cross_feature_conf()
print('\nCross feature conf:')
for f in cross_feature_list:
print(f)
category_feature = config.get_feature_name('category')
print('\nCategory feature:')
print(category_feature)
members = [m for m in Config.__dict__ if not m.startswith('_')]
print('\nConfig class members:')
print(members)
if __name__ == '__main__':
_test()
| [
"wejiang@alveo0.ethz.ch"
] | wejiang@alveo0.ethz.ch |
d43b4ac02dac484f79232529a81775994b074d56 | 766b6f7029be1123451c772dcd30ec76303f0557 | /gxpbin/experimental/ucpw | 8cc03e919913c5ce9de2348ec12cafbf033fdfcc | [
"BSD-3-Clause"
] | permissive | qnu/gxp | 774cd582ef5c1237cb7e5ab95b79d849b5009fd4 | 8dd9d396102e254cb4712fe572b64e398a5f069b | refs/heads/master | 2020-12-14T08:53:19.583854 | 2018-08-03T17:25:45 | 2018-08-03T17:25:45 | 34,555,726 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 26,534 | #!/usr/bin/env python
# -*- python -*-
import sys,time,os,select,mmap,random
from ucp_common import *
# Please set options in ucp_common.py
############################################################
# from sock.py
############################################################
"""
'sock.py'
Wrapped socket implementation
- Socket
- ServerSocket
"""
class Socket:
"""
A smart socket with sendInt and sendString
"""
def __init__(self, sock=None, peer=None):
""" Create a socket """
""" peer : (hostname, port) """
if sock is None:
self.sock = socket.socket(\
socket.AF_INET, socket.SOCK_STREAM)
else:
self.sock = sock
self.peer = peer
def connect(self, peer):
""" Establish a connection """
assert peer is not None
self.peer = peer
self.sock.connect(self.peer)
#Es("Connect OK %s -> %s\n"%(self.pper[0]))
if self.sock is None:
raise "Could not establish a connection to %s:%d"%(self.peer[0], self.peer[1])
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, SNDBUF_SIZE)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, RCVBUF_SIZE)
if debug >= 2:
Es("New connection to %s:%d\n"%(self.peer[0], self.peer[1]))
def close(self):
""" Close the socket """
self.sock.close()
def send(self, msg, msglen = -1):
""" Safe send() """
self.sock.sendall(msg)
def recv(self, msglen):
""" Safe recv() """
A = []
rcvd = 0
while rcvd < msglen:
chunk = self.sock.recv(msglen-rcvd)
if chunk == '':
raise socket.error("socket connection broken")
A.append(chunk)
rcvd += len(chunk)
return "".join(A)
#return self.sock.recvall(msglen)
def rawrecv(self, msglen):
return self.sock.recv(msglen)
def rawsend(self, msg):
return self.sock.send(msg)
def sendInt(self, value):
self.send(struct.pack('i', value))
def recvInt(self):
msg = self.recv(struct.calcsize('i'))
return struct.unpack('i', msg)[0]
def sendString(self, msg, length = -1):
if length == -1: length = len(msg)
self.sendInt(length)
self.send(msg, length)
def recvString(self):
msglen = self.recvInt()
return self.recv(msglen)
class ServerSocket:
def __init__(self, initial_port = None):
""" Create a socket """
""" peer : (hostname, port) """
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
addr = my_gethostname()
if initial_port is not None:
self.port = initial_port
self.sock.bind(("", self.port))
else:
self.sock.bind(("",0))
self.sock.listen(5) # arg : number of backlog
#M("Listening on %s\n"%(str(self.sock.getsockname())))
self.port = self.sock.getsockname()[1]
def accept(self):
conn,peer = self.sock.accept()
if debug >= 2 :
Es("Accepted from %s:%d\n"%(peer[0], peer[1]))
return Socket(sock=conn, peer=peer)
def close(self):
self.sock.close()
############################################################
# from data_source.py
############################################################
def ensure_dir(path):
"""
ensure directory PATH exists, creating its ancestors if necessary
"""
if os.access(path, os.F_OK):
ensure_dir_1(path)
elif path != "/":
ensure_dir(os.path.dirname(path))
ensure_dir_1(path)
def ensure_dir_1(dire):
"""
ensure directory DIRE exists, assuming its direct parent directory
exists.
i.e., if dire = 'a/b/c', we assume a/b exists
"""
if not os.access(dire, os.F_OK):
try:
os.mkdir(dire)
except OSError:
pass
if not os.access(dire, os.F_OK):
M("Output directory %s does not exist and could not create it\n" \
% dire)
os._exit(1)
s = os.stat(dire)
if not stat.S_ISDIR(s[0]):
M("%s is not a directory %s\n" % dire)
sys.exit(1)
if not os.access(dire, os.W_OK):
M("Directory %s not writable\n" % dire)
sys.exit(1)
class DataSource:
"""
Open files and read/write data on them.
It also manages chunk assignment to trees
"""
R,RW = 0,1
def __init__(self, filepath, files, mode, chunksize= CHUNKSIZE):
assert mode in [DataSource.R, DataSource.RW]
self.chunksize = chunksize
self.filepath = filepath
self.files = files # [(rel_fn, size), ...]
self.init_vars()
self.open_all(mode)
def init_vars(self):
self.total_size = 0
for _,sz in self.files:
self.total_size += sz
self.n_chunks = int(math.ceil(float(self.total_size)/self.chunksize)) # Kiriage
self.chunk_mappings = [LARGE_VALUE] * self.n_chunks
# The last packet is possibly smaller than the others
self.current_stage = LARGE_VALUE
# LARGE_VALUE means 'Unsent' in source, 'Unreceived' in destinations
def open_all(self, access):
if access == self.RW:
self.prepare_files()
self.fds = []
for filename_quoted,filesize in self.files:
if filename_quoted == ".":
abs_filename = self.filepath
else:
filename = urllib.unquote_plus(filename_quoted)
abs_filename = os.path.join(self.filepath, filename)
if filesize != os.path.getsize(abs_filename):
raise MyException("Filesize of (%s) has been modified (%d to %d)"%(abs_filename, filesize, os.path.getsize(abs_filename)))
if access == DataSource.R:
fd = open_for_read(abs_filename)
else:
fd = open_for_readwrite(abs_filename)
self.fds.append(fd)
def close_all(self):
for fp in self.fds:
fp.close()
def get_chunk_id(self,requested_pid):
assert requested_pid < self.current_stage
for cid,pid in enumerate(self.chunk_mappings):
if pid != self.current_stage: continue
self.chunk_mappings[cid] = requested_pid
return cid
return None
def has_chunk(self,chunk_id):
return self.chunk_mappings[chunk_id] != LARGE_VALUE
def set_stage(self, stage):
self.current_stage = stage
def prepare_files(self):
"""
Create required directory and assure the freespace
(for node that receives data)
"""
def create_empty_file(filename, size):
fp = open_for_write(filename)
fp.seek(size-1)
fp.write('\0')
fp.close()
for filename_quoted,filesize in self.files:
# Returned: like... /tmp/kay/hoge
if filename_quoted == ".":
abs_filename = self.filepath
create_empty_file(abs_filename, filesize)
else:
filename = urllib.unquote_plus(filename_quoted)
abs_filename = os.path.join(self.filepath, filename)
basedir = os.path.dirname(abs_filename)
ensure_dir(basedir)
create_empty_file(abs_filename, filesize)
def read_chunk(self, chunk_id):
return self.read(chunk_id*self.chunksize, min((chunk_id+1)*self.chunksize, self.total_size))
def write_chunk(self, chunk_id, chunk):
if self.has_chunk(chunk_id): return
self.chunk_mappings[chunk_id] = 0
if not PRETEND:
self.write(chunk_id*self.chunksize, chunk)
def read(self, start, end):
if PRETEND:
return [" " * (end-start)]
""" Get an array of fractions """
f_end = 0
ret_list = []
for file_id,(_,file_size) in enumerate(self.files):
f_start = f_end
f_end += file_size
if end <= f_start: break
read_start = max(start, f_start)
read_end = min(end, f_end)
if read_start >= read_end: continue
self.fds[file_id].seek(read_start-f_start)
A = self.fds[file_id].read(read_end-read_start)
assert len(A) == read_end - read_start
ret_list.append(A)
return ret_list
def receive_completed(self):
""" return if receiving process has completed """
#M("%s\n"%(self.chunk_mappings.count(LARGE_VALUE)))
return LARGE_VALUE not in self.chunk_mappings
def get_n_unreceived(self):
""" return number of trees on which trnasfer is going on """
return self.chunk_mappings.count(LARGE_VALUE)
def write(self, start, A):
if start + len(A) > self.total_size:
M("Invalid write: start = %d, len = %d, total = %d\n"%(start, len(A), self.total_size))
assert False
end = start + len(A)
f_end = 0
for file_id,(_,file_size) in enumerate(self.files):
f_start = f_end
f_end += file_size
if end <= f_start: break
if f_end <= start: continue
write_start = max(start, f_start)
write_end = min(end, f_end)
if write_start >= write_end: continue
fd = self.fds[file_id]
fd.seek(write_start - f_start)
fd.write(A[write_start-start:write_end-start])
############################################################
# from ucpw
############################################################
PID_SHIFT = 1000
class UCPWorker:
"""
It receives message from the UCPMaster, fetch file and relay files as ordered.
"""
def __init__(self):
self.t0 = time.time()
self.ds = None
self.debug = 1
self.set_gxp_vars()
self.comm = Comm(3,4)
self.bind() # Bind and issue "BIND" command and start accepting
self.init_vars() #
self.msg_loop() # Msg loop
###########################
#--- Preparing Functions
###########################
def set_gxp_vars(self):
self.ipe = int(os.environ["GXP_EXEC_IDX"])
self.npe = int(os.environ["GXP_NUM_EXECS"])
self.hostname = os.environ["GXP_HOSTNAME"]
self.addr = my_getaddr() # Get global addrd
def bind(self):#OK
""" Prepare a server socket """
self.ss = ServerSocket()
self.accepted = 0
self.n_accepts = -1
# Issue "BIND" command
# BIND <ipe> <npe> <long_hostname> <port>
self.issue("BIND %d %d %s %s %d\n"%(self.ipe, self.npe, self.hostname, self.addr, self.ss.port))
def init_vars(self):
self.pipes = {} # rawsock -> [pipeline_id]
self.r_socks = [self.comm.from_fno]
self.pipe_id_to_sock = {}
self.recvsend_done_issued = False
self.recv_started = False
self.total_rcvd = 0
self.total_sent = 0
########################
#--- Util Functions
########################
def issue(self, cmd):
""" Issue a command """
if self.debug > 1:
M("Worker issued %s\n"%cmd)
self.comm.write(cmd)
########################
#--- Main Loop
########################
def msg_loop(self):
try:
while True:
# rs: from_all, ss, socks
rs,ws,es = select.select(self.r_socks, [], [], 1.0)
for fd in rs:
if fd == self.comm.from_fno:
self.handle_mw()
elif fd == self.ss.sock.fileno():
self.accept()
else:
self.recv_data(fd)
except socket.error,e:
#M(str(e))
sys.exit(0)
########################
#--- Handle MW Messages
########################
def handle_mw(self):
msg = self.comm.read()
A = msg.split()
if len(A) == 0:
raise MWException()
if len(A) == 1:
if A[0] == "ABORT":
""" terminate """
if self.debug > 1:
Es("Received ABORT msg, terminates\n")
sys.exit(1)
elif A[0] == "QUIT":
""" terminate """
time.sleep(0.5)
if self.ds is not None:
self.ds.close_all()
os._exit(0)
else:
raise MWException("")
ipe = atoi_ex(A[1], "ipe in mw msg")
if ipe != self.ipe: return
if A[0] == "FILE_MATCH": #OK
self.handle_file_match_msg(A[2:])
# --- Messages to Establish Connections
elif A[0] == "SOCKET": #OK
self.handle_socket_msg(A[2:])
elif A[0] == "SSH":
raise "Not Implemented Yet"
self.handle_ssh_msg(A[2:])
elif A[0] == "ACCEPT":
self.handle_accept_msg(A[2:])
# --- Messages to Actually Transfer Data
elif A[0] == "SEND": #OK
self.handle_send_msg(A[2:])
elif A[0] == "RECVSEND":
self.handle_recvsend_msg(A[2:])
# --- Error
else:
raise MWException("Command %s is not implemented yet\n"%(A[0]))
assert False # For now
#--------------------------------------
def handle_file_match_msg(self, A):
def filelist(ptn0):
# Get files under some directory
# ret = filelist("~/ucp")
ptn1 = os.path.expanduser(ptn0)
ptn = os.path.abspath(ptn1)
if os.path.isfile(ptn):
if not os.access(ptn, os.R_OK): return []
size = os.path.getsize(ptn)
return [(".", size)]
if os.path.isdir(ptn):
# ptn never ends with '/'
ret = []
for root, dirs, files in os.walk(ptn):
A = root.split(ptn, 1)
assert A[0] == ''
if A[1] == "":
rel_root = ""
else:
rel_root = A[1][1:]
for f in files:
f2 = os.path.join(root, f)
if os.path.isfile(f2):
if os.access(f2, os.R_OK):
size = os.path.getsize(f2)
ret.append((os.path.join(rel_root, f), size))
return ret
return []
filepath_ptn = A[0]
files = filelist(filepath_ptn)
""" FILES <ipe> <n_files> <filepath0> <size0> <filepath1> <size1> ...
When the target pattern is a file, filepath is set to '.'
"""
n_files = len(files)
msgs = ["FILES", "%d"%self.ipe, "%d"%n_files]
for f,s in files:
f_quoted = urllib.quote_plus(f)
msgs.append(f_quoted)
msgs.append("%d"%s)
msg = " " .join(msgs) + "\n"
self.issue(msg)
def handle_socket_msg(self, A): #OK
""" SOCKET <ipe> <conn_id> <dest_ipe> <addr> <port> <n_pipelines> <id0> ...
0 1 2 3 4
when pipeline_id is more than PID_SHIFT, it means the connection is used in a backward manner
"""
# Parse args
conn_id = atoi_ex(A[0], "conn_id")
dest_ipe = atoi_ex(A[1], "dest_ipe")
addr = A[2]
port = atoi_ex(A[3], "port")
f_pipes = []
b_pipes = []
n_pipes = atoi_ex(A[4], "n_pipelines")
for i in range(n_pipes):
pipe_id = atoi_ex(A[i+5], "pipeline_id")
if pipe_id >= PID_SHIFT:
b_pipes.append(pipe_id - PID_SHIFT)
else:
f_pipes.append(pipe_id)
# Perform connect and register connection
sock = Socket()
sock.connect(peer = (addr, port))
if debug > 1:
M("Connection established between ipe=%d, addr=%s\n"%(dest_ipe,addr))
"""
<connected_ipe> <n_pipelines> <id0> <id1> ...
"""
sock.sendInt(self.ipe)
if len(f_pipes) > 0:
sock.sendInt(0)
else:
sock.sendInt(len(b_pipes))
for pipe_id in b_pipes:
sock.sendInt(pipe_id)
self.pipes[sock.sock] = f_pipes
for pipe_id in f_pipes:
assert pipe_id not in self.pipe_id_to_sock
self.pipe_id_to_sock[pipe_id] = sock
self.issue("CONNECT_OK %d %d\n"%(self.ipe, dest_ipe))
def handle_ssh_msg(self, A):
#TODO
""" SSH <ipe> <target_ipe> <username> <n_names> <sshname0> <sshname1> ... """
# Parse args
target_ipe = atoi_ex(A[0], "target_ipe")
n_names = atoi_ex(A[1], "n_names")
if n_names == 0: raise MyException("n_names is 0, no available name to ssh")
ssh_names = []
for i in range(n_names):
ssh_names.append(A[i+2])
# Perform SSH and register connection for each ssh_name
raise "Not Implemented Yet"
def handle_accept_msg(self, A):
# Start selecting the serverSock
self.r_socks.append(self.ss.sock.fileno())
self.n_accepts = atoi_ex(A[0], "n_accepts")
def check_accept_done(self):
if self.accepted == self.n_accepts:
self.issue("ACCEPT_DONE %d\n"%(self.ipe))
self.r_socks.remove(self.ss.sock.fileno())
def accept(self):
"""
accept selected ServerSocket
"""
assert self.n_accepts != self.accepted
sock = self.ss.accept()
"""
<connected_ipe> <n_pipelines> <id0> <id1> ...
"""
ipe = sock.recvInt()
pipeid_list = []
n_pipes = sock.recvInt()
for i in range(n_pipes):
pipe_id = sock.recvInt()
pipeid_list.append(pipe_id)
self.pipes[sock.sock] = pipeid_list
for pipe_id in pipeid_list:
assert pipe_id not in self.pipe_id_to_sock
self.pipe_id_to_sock[pipe_id] = sock
self.accepted += 1
assert self.n_accepts != -1
self.check_accept_done()
def handle_send_msg(self, A):
"""
SEND <ipe> <dirpath> \
<n_files> <filename0> <filesize0> <filename1> <filesize1>... \
<n_pipelines> <bw0> <bw1> ...
"""
def parse_args(A):
dirpath = A[0]
n_files = atoi_ex(A[1], "n_files")
index = 2
files = []
for i in range(n_files):
filename = A[index]
filesize = atoi_ex(A[index + 1], "filesize")
files.append((filename, filesize))
index += 2
pipes = []
n_pipes = atoi_ex(A[index], "n_pipes")
index += 1
for i in range(n_pipes):
bw = atof_ex(A[index], "pipeline_bw")
pipes.append(bw)
index += 1
return dirpath,files,pipes
def calc_pipe_id_rates(pipes):
pipe_id_rates = [] # A total of this list equals 10
bw_sum = sum(pipes)
chunks_per_a_loop = 100 - len(pipes)
bw_tmp = 0
prev_cc = 0
for pipe_id, bw in enumerate(pipes):
bw_tmp += bw
cc = int(chunks_per_a_loop * bw_tmp / bw_sum) + pipe_id + 1
pipe_id_rates.append((cc - prev_cc))
prev_cc = cc
return pipe_id_rates
### Parse args
dirpath,files,pipes = parse_args(A)
self.ds = DataSource(dirpath, files, DataSource.R)
## Prepare pipe_id_rates (rate for each pipeline)
pipe_id_rates = calc_pipe_id_rates(pipes)
# Send chunks in a proportional rate to pipe_id_rates
n_pipes = len(pipes)
if debug >= 1:
Es("---------------------")
Es("N_PIPES=%d\n"%(n_pipes))
Es("pipe_id_rates = %s\n"%(str(pipe_id_rates)))
Es("---------------------")
self.t1 = time.time()
self.serial_send(pipe_id_rates)
if TIME_1:
t = time.time() - self.t1
else:
t = time.time() - self.t0
if self.total_sent != self.ds.total_size:
M("Error: Sent size: %d, total size: %d\n"%(self.total_sent, self.ds.total_size))
self.issue("SEND_DONE %d %d %f\n"%(self.ipe, self.total_sent, t))
# def serial_send_0(self, pipe_id_rates):
# w_socks = []
# for s in self.pipe_id_to_sock.values():
# w_socks.append(s.sock)
# for stage in range(len(pipe_id_rates), 0, -1):
# if self.debug >= 1:
# M("Stage %d/%d\n"%(stage,len(pipe_id_rates)))
# breakflag = False
# while True:
# _,ws,_ = select.select([], w_socks, [], None)
# for rawsock in ws:
# pipe_id_list = self.pipes[rawsock]
# pipe_id = pipe_id_list[random.randint(0,len(pipe_id_list)-1)]
# if pipe_id >= stage: continue
# chunk_id = self.ds.get_chunk_id(pipe_id)
# if chunk_id is None:
# breakflag = True
# break
# sock = self.pipe_id_to_sock[pipe_id]
# self.actually_send(pipe_id, chunk_id, sock)
# if breakflag: break
# s = self.pipe_id_to_sock[stage-1]
# w_socks.remove(s.sock)
# self.ds.set_stage(stage-1)
def serial_send(self, pipe_id_rates):
count = 0
for stage in range(len(pipe_id_rates), 0, -1):
if self.debug >= 2:
M("Entering stage %d\n"%stage)
breakflag = False
while True:
for pipe_id,rate in enumerate(pipe_id_rates):
if pipe_id >= stage: continue
# Pick up the sock
sock = self.pipe_id_to_sock[pipe_id]
for i in range(rate):
chunk_id = self.ds.get_chunk_id(pipe_id)
if chunk_id is None:
breakflag = True
break
if self.debug >= 2:
M("Stage %d: Chunk [%d] for pipeline %d \n"%(stage,start/self.ds.chunksize,pipe_id))
self.actually_send(pipe_id, chunk_id, sock)
count += 1
if count % 10 == 0:
self.comm.write("POPUP %d #\n"%(self.ipe))
if breakflag: break
if breakflag: break
self.ds.set_stage(stage-1)
def actually_send(self, pipe_id, chunk_id, sock, data=None):
"""
>> <pipe_id> <cid>
<< <1 or 0>
1>>
<msg_size> <data>
0 >>
(do nothing)
"""
sock.sendInt(pipe_id)
sock.sendInt(chunk_id)
need_more = sock.recvInt()
if need_more == 1:
if data is None:
data = self.ds.read_chunk(chunk_id)
size = 0
for A in data:
size += len(A)
sock.sendInt(size)
for A in data:
sock.send(A)
self.total_sent += size
def actually_recv(self, sock):
pipe_id = sock.recvInt()
chunk_id = sock.recvInt()
if self.ds.has_chunk(chunk_id):
sock.sendInt(0)
return pipe_id,chunk_id,None
sock.sendInt(1)
size = sock.recvInt()
data = sock.recv(size)
self.total_rcvd += len(data)
return pipe_id,chunk_id,data
def handle_recvsend_msg(self, A):
"""
RECVSEND <ipe> <dirpath>\
<n_files> <filename0> <filesize0> <filename1> <filesize1>...
"""
def parse_args(A):
dirpath = A[0]
n_files = atoi_ex(A[1], "n_files")
index = 2
# Parse files
files = []
for i in range(n_files):
filename = A[index]
filesize = atoi_ex(A[index + 1], "filesize")
files.append((filename, filesize))
index += 2
return dirpath,files
dirpath,files = parse_args(A)
self.ds = DataSource(dirpath,files, DataSource.RW)
for rawsock in self.pipes:
self.r_socks.append(rawsock)
def recv_data(self, rawsock):
"""
Receive data from `sock'
Protocol: <pipe_id> <start> <end> <data>
"""
sock = Socket(sock=rawsock)
if not self.recv_started:
if self.debug > 1:
M("Starting to receive data\n")
self.recv_started = True
self.t1 = time.time()
pipe_id,chunk_id,data = self.actually_recv(sock)
if self.debug >= 2:
M("Got chunk[%d] from pipeline %d\n"%(chunk_id,pipe_id))
self.ds.write_chunk(chunk_id, data)
# Relay
if pipe_id in self.pipe_id_to_sock:
sock = self.pipe_id_to_sock[pipe_id]
if data is None:
self.actually_send(pipe_id, chunk_id, sock)
else:
self.actually_send(pipe_id, chunk_id, sock, [data])
if not self.recvsend_done_issued:
if not self.ds.receive_completed(): return
if TIME_1:
t = time.time() - self.t1
else:
t = time.time() - self.t0
assert self.total_rcvd == self.ds.total_size
self.issue("RECVSEND_DONE %d %d %f\n"%(self.ipe,self.total_rcvd,t))
self.recvsend_done_issued = True
def main():
worker = UCPWorker()
if __name__ == '__main__':
main()
| [
"dun@cs.uchicago.edu"
] | dun@cs.uchicago.edu | |
4ce6947775dab7af88e1fc6fdad91346ee7eeae3 | 0e9499a04019c844b1c5570ad401fc887aecfda1 | /ability.py | 1c5dd1771e15851e6e7b7706ef814db3fe2e7345 | [] | no_license | stanleychow/Superhero-Team-Dueler | 3365bf6722e193ff2a66f8868edaba72cf830262 | 9e12d24bf716037374259cd1aef0fe524063f4da | refs/heads/main | 2023-01-18T20:17:52.354410 | 2020-11-30T01:52:46 | 2020-11-30T01:52:46 | 314,967,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | #Random Module
import random
#Ability Constructor
class Ability:
def __init__(self, name, max_damage):
self.name = name
self.max_damage = max_damage
def attack(self):
random_value = random.randint(0, self.max_damage)
return random_value
if __name__ == "__main__":
ability = Ability("Debugging Ability", 20)
print(ability.name)
print(ability.attack()) | [
"chowtech1022@gmail.com"
] | chowtech1022@gmail.com |
0070d9d32bf7e024587c9d7fad59d432a7f11e4e | 0a3940d6f8c083900db072876f3afd769bbfdb71 | /2019 Mar 24/bricks.py | e08fbc399a62434e0ca89e3ebeb800ae16364394 | [] | no_license | HelioStrike/Arjuna-Code | 174a104d4c8687535ae06ca28d71188eeb95784e | 1679ba15990116e6abba80979f228b0860ce8dc4 | refs/heads/master | 2020-04-21T23:38:11.147929 | 2019-09-08T05:05:36 | 2019-09-08T05:05:36 | 169,952,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | n = int(input())
mapp = [0 for i in range(100)]
for i in range(n):
s = input().split(' ')
summ = 0
for val in s[:-1]:
summ += int(val)
mapp[summ] += 1
print(n-max(mapp)) | [
"rageofepicfury@gmail.com"
] | rageofepicfury@gmail.com |
282ae6e15473678f51124dfb45ce9e9e04bf37aa | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/birdlik.py | b5d428582689d4d0a1ff28dc6b984b99dcdccca4 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 82 | py | ii = [('LandWPA.py', 1), ('MedwTAI.py', 1), ('MedwTAI2.py', 1), ('WordWYR.py', 1)] | [
"prabhjyotsingh95@gmail.com"
] | prabhjyotsingh95@gmail.com |
15f5e625e8e2207124235ed86414a25fa7c30103 | b059726917ad9a6e6df20a489e86edcdb5ba47f8 | /negentropy/negentropy_kitchen_order/doctype/employee_pending_amount/employee_pending_amount.py | 22b25e6905aef4aab87a047d26c1afbed30a7821 | [
"MIT"
] | permissive | pjamsheer/Negentropy | f9a9283186ea6f00d9a9b91939b3cdabf96a61df | aae618a42faef75880c012d9e6d8bca69c02f340 | refs/heads/master | 2022-04-12T14:13:35.667430 | 2019-12-31T11:33:03 | 2019-12-31T11:33:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Kaynes Technology India Pvt Ltd and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class EmployeePendingAmount(Document):
pass
| [
"ramya.a@kaynestechnology.net"
] | ramya.a@kaynestechnology.net |
ad3bb9cee62e36edbee53a574699af1e1489a8af | a06596102ed51de6b7786c1a3260f8d75bae676c | /adanet/core/eval_metrics_test.py | 8b5251e4bbd59810c4c26ccf00943c06677a0940 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | phymucs/adanet | 803b07cea49cc3821657085252c222ebe487be20 | 9fcd68cc220371d75923dcbf4eae9c1c6b9ed106 | refs/heads/master | 2020-08-11T18:56:26.858537 | 2019-10-02T16:09:17 | 2019-10-02T16:11:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,088 | py | """Tests for AdaNet eval metrics.
Copyright 2019 The AdaNet Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from adanet import tf_compat
from adanet.core.architecture import _Architecture
from adanet.core.eval_metrics import call_eval_metrics
import adanet.core.testing_utils as tu
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.eager import context
from tensorflow.python.framework import test_util
# pylint: enable=g-direct-tensorflow-import
class MetricsTest(tu.AdanetTestCase):
def setup_graph(self):
# We only test the multi head since this is the general case.
self._features = {"x": tf.constant([[1.], [2.]])}
heads = ("head_1", "head_2")
labels = tf.constant([0, 1])
self._labels = {head: labels for head in heads}
predictions = {(head, "predictions"): labels for head in heads}
loss = tf.constant(2.)
self._estimator_spec = tf_compat.v1.estimator.tpu.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
predictions=predictions,
eval_metrics=(self._spec_metric_fn, {
"features": self._features,
"labels": self._labels,
"predictions": predictions,
"loss": loss
}))
def _run_metrics(self, metrics):
metric_ops = metrics
if isinstance(metric_ops, tuple):
metric_ops = call_eval_metrics(metric_ops)
self.evaluate((tf_compat.v1.global_variables_initializer(),
tf_compat.v1.local_variables_initializer()))
self.evaluate(metric_ops)
return {k: self.evaluate(metric_ops[k][0]) for k in metric_ops}
def _assert_tensors_equal(self, actual, expected):
actual, expected = self.evaluate((actual, expected))
self.assertEqual(actual, expected)
def _spec_metric_fn(self, features, labels, predictions, loss):
actual = [features, labels, predictions, loss]
expected = [
self._features, self._labels, self._estimator_spec.predictions,
self._estimator_spec.loss
]
self._assert_tensors_equal(actual, expected)
return {"metric_1": tf_compat.v1.metrics.mean(tf.constant(1.))}
def _metric_fn(self, features, predictions):
actual = [features, predictions]
expected = [self._features, self._estimator_spec.predictions]
self._assert_tensors_equal(actual, expected)
return {"metric_2": tf_compat.v1.metrics.mean(tf.constant(2.))}
@parameterized.named_parameters(
{
"testcase_name": "use_tpu",
"use_tpu": True,
},
{
# TODO: Figure out why this gives error in TF 2.0:
# ValueError: Please call update_state(...) on the "mean_1" metric.
"testcase_name": "not_use_tpu",
"use_tpu": False,
})
@test_util.run_in_graph_and_eager_modes
def test_subnetwork_metrics(self, use_tpu):
with context.graph_mode():
self.setup_graph()
spec = self._estimator_spec
if not use_tpu:
spec = spec.as_estimator_spec()
metrics = tu.create_subnetwork_metrics(
self._metric_fn,
use_tpu=use_tpu,
features=self._features,
labels=self._labels,
estimator_spec=spec)
actual = self._run_metrics(metrics.eval_metrics_tuple())
expected = {"loss": 2., "metric_1": 1., "metric_2": 2.}
self.assertEqual(actual, expected)
@test_util.run_in_graph_and_eager_modes
def test_subnetwork_metrics_user_metric_fn_overrides_metrics(self):
with context.graph_mode():
self.setup_graph()
overridden_value = 100.
def _overriding_metric_fn():
value = tf.constant(overridden_value)
return {"metric_1": tf_compat.v1.metrics.mean(value)}
metrics = tu.create_subnetwork_metrics(
_overriding_metric_fn,
features=self._features,
labels=self._labels,
estimator_spec=self._estimator_spec)
actual = self._run_metrics(metrics.eval_metrics_tuple())
expected = {"loss": 2., "metric_1": overridden_value}
self.assertEqual(actual, expected)
@test_util.run_in_graph_and_eager_modes
def test_ensemble_metrics(self):
with context.graph_mode():
self.setup_graph()
architecture = _Architecture("test_ensemble_candidate", "test_ensembler")
architecture.add_subnetwork(iteration_number=0, builder_name="b_0_0")
architecture.add_subnetwork(iteration_number=0, builder_name="b_0_1")
architecture.add_subnetwork(iteration_number=1, builder_name="b_1_0")
architecture.add_subnetwork(iteration_number=2, builder_name="b_2_0")
metrics = tu.create_ensemble_metrics(
self._metric_fn,
features=self._features,
labels=self._labels,
estimator_spec=self._estimator_spec,
architecture=architecture)
actual = self._run_metrics(metrics.eval_metrics_tuple())
serialized_arch_proto = actual["architecture/adanet/ensembles"]
expected_arch_string = b"| b_0_0 | b_0_1 | b_1_0 | b_2_0 |"
self.assertIn(expected_arch_string, serialized_arch_proto)
@parameterized.named_parameters(
{
"testcase_name": "use_tpu_evaluating",
"use_tpu": True,
"mode": tf.estimator.ModeKeys.EVAL,
}, {
"testcase_name": "use_tpu_not_evaluating",
"use_tpu": True,
"mode": tf.estimator.ModeKeys.TRAIN,
}, {
"testcase_name": "not_use_tpu_evaluating",
"use_tpu": False,
"mode": tf.estimator.ModeKeys.EVAL,
}, {
"testcase_name": "not_use_tpu_not_evaluating",
"use_tpu": False,
"mode": tf.estimator.ModeKeys.TRAIN,
})
@test_util.run_in_graph_and_eager_modes
def test_iteration_metrics(self, use_tpu, mode):
with context.graph_mode():
self.setup_graph()
best_candidate_index = 3
ensemble_metrics = []
for i in range(10):
def metric_fn(val=i):
metric = tf.keras.metrics.Mean()
metric.update_state(tf.constant(val))
return {
"ensemble_v1_metric": tf_compat.v1.metrics.mean(tf.constant(val)),
"ensemble_keras_metric": metric
}
ensemble_metrics.append(tu.create_ensemble_metrics(metric_fn))
metrics = tu.create_iteration_metrics(ensemble_metrics=ensemble_metrics)
metrics_fn = (
metrics.best_eval_metrics_tuple
if use_tpu else metrics.best_eval_metric_ops)
actual = self._run_metrics(
metrics_fn(tf.constant(best_candidate_index), mode) or {})
if mode == tf.estimator.ModeKeys.EVAL:
expected = {
"ensemble_v1_metric": best_candidate_index,
"ensemble_keras_metric": best_candidate_index,
"iteration": 1
}
# We don't actually provide an architecture, so the default will be
# inside.
del actual["architecture/adanet/ensembles"]
else:
expected = {}
self.assertEqual(actual, expected)
@test_util.run_in_graph_and_eager_modes
def test_metric_ops_not_duplicated_on_cpu(self):
with context.graph_mode():
self.setup_graph()
metric_fn = lambda: {"metric": (tf.constant(5), tf.constant(5))}
best_candidate_index = 3
mode = tf.estimator.ModeKeys.EVAL
ensemble_metrics = tu.create_ensemble_metrics(metric_fn)
subnetwork_metrics = tu.create_subnetwork_metrics(metric_fn)
iteration_metrics = tu.create_iteration_metrics(
ensemble_metrics=[ensemble_metrics],
subnetwork_metrics=[subnetwork_metrics])
ensemble_ops1 = call_eval_metrics(ensemble_metrics.eval_metrics_tuple())
ensemble_ops2 = call_eval_metrics(ensemble_metrics.eval_metrics_tuple())
subnetwork_ops1 = call_eval_metrics(
subnetwork_metrics.eval_metrics_tuple())
subnetwork_ops2 = call_eval_metrics(
subnetwork_metrics.eval_metrics_tuple())
iteration_ops1 = iteration_metrics.best_eval_metric_ops(
best_candidate_index, mode)
iteration_ops2 = iteration_metrics.best_eval_metric_ops(
best_candidate_index, mode)
self.assertEqual(subnetwork_ops1, subnetwork_ops2)
self.assertEqual(ensemble_ops1, ensemble_ops2)
self.assertEqual(iteration_ops1, iteration_ops2)
for ops in [ensemble_ops1, subnetwork_ops1, iteration_ops1]:
self.assertIsNotNone(ops)
if __name__ == "__main__":
tf.test.main()
| [
"weill@google.com"
] | weill@google.com |
8e298b1a973704c70cc3155b17995c14b3711113 | 7621431120b33ebfad572747c9d230134399b1ac | /Codechef Long June Div 2/try.py | 229dc7f1510cf81c1a716b472e8ecb52894b6d58 | [] | no_license | Anmol-Sri/Competitive-Environment | 018a9e2c0dbdc7655b508630da46aaab21916eca | 0a9fec631a10def56ddb01c0da7fb9d04277eeae | refs/heads/master | 2023-06-26T20:54:46.750369 | 2021-07-30T18:30:53 | 2021-07-30T18:30:53 | 301,129,278 | 0 | 1 | null | 2021-07-27T19:48:06 | 2020-10-04T12:52:41 | C++ | UTF-8 | Python | false | false | 479 | py | import math
def check(num):
if(num==0):
return False
x=(int)(math.sqrt(num))
if(x*(x+1)==num):
return True
return False
s=input()
n=len(s)
ans=[]
for i in range(n):
for end in range(i+1,n+1):
sub=s[i:end]
num=int(sub)
if check(num):
ans.append(num)
if not ans:
print("-1")
else:
ans.sort()
res = []
[res.append(x) for x in ans if x not in res]
for i in range(len(res)):
if(i==len(res)-1):
print(res[i])
else:
print(res[i],",",end='',sep='')
| [
"anmolsrirocks@gmail.com"
] | anmolsrirocks@gmail.com |
5a2178dd2a85a60fc79ccabda55c29d08be598ea | 44b924fdd119b7953c7ef69f03e62ae0b23722a9 | /hyperdex-datastructures-fuzz-test | 7be53a8181af1b953caafe89a1c798ce5eb59641 | [
"BSD-3-Clause"
] | permissive | hyc/HyperDex | 7ecd1b5b0b16ee52a9d253239e5ab4fff0b78532 | f6b010fba61e654e2f1f0602383cab9c04787766 | refs/heads/master | 2021-01-17T04:24:06.741553 | 2013-12-10T09:40:26 | 2013-12-10T09:40:26 | 8,479,640 | 16 | 3 | null | null | null | null | UTF-8 | Python | false | false | 25,274 | #!/usr/bin/env python
# Copyright (c) 2012, Cornell University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of HyperDex nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''Construct the space shown below and fuzz test it.'''
import collections
import copy
import operator
import pprint
import random
import string
import sys
import time
import hyperclient
from hyperclient import *
HYPERCLIENT_SUCCESS = 8448
HYPERCLIENT_NOTFOUND = 8449
HYPERCLIENT_SEARCHDONE = 8450
HYPERCLIENT_CMPFAIL = 8451
HYPERCLIENT_UNKNOWNSPACE = 8512
HYPERCLIENT_COORDFAIL = 8513
HYPERCLIENT_SERVERERROR = 8514
HYPERCLIENT_POLLFAILED = 8515
HYPERCLIENT_OVERFLOW = 8516
HYPERCLIENT_RECONFIGURE = 8517
HYPERCLIENT_TIMEOUT = 8519
HYPERCLIENT_UNKNOWNATTR = 8520
HYPERCLIENT_DUPEATTR = 8521
HYPERCLIENT_NONEPENDING = 8523
HYPERCLIENT_DONTUSEKEY = 8524
HYPERCLIENT_WRONGTYPE = 8525
HYPERCLIENT_NOMEM = 8526
HYPERCLIENT_EXCEPTION = 8574
HYPERCLIENT_ZERO = 8575
HYPERCLIENT_A = 8576
HYPERCLIENT_B = 8577
CHARSET = string.ascii_letters + string.digits + string.punctuation
ATTRS = ('s', 'i', 'ls', 'li', 'ss', 'si', 'mss', 'msi', 'mis', 'mii')
INT64_MIN = -9223372036854775808
INT64_MAX = 9223372036854775807
def generate_bytes():
return ''.join([random.choice(CHARSET) for i in xrange(random.randint(0, 16))])
def generate_int():
return random.randint(INT64_MIN, INT64_MAX)
def generate_list_bytes():
return [generate_bytes() for i in xrange(random.randint(0, 16))]
def generate_list_int():
return [generate_int() for i in xrange(random.randint(0, 16))]
def generate_set_bytes():
return set(generate_list_bytes())
def generate_set_int():
return set(generate_list_int())
def generate_map_bytes_bytes():
return dict([(generate_bytes(), generate_bytes()) for i in xrange(random.randint(0, 16))])
def generate_map_bytes_int():
return dict([(generate_bytes(), generate_int()) for i in xrange(random.randint(0, 16))])
def generate_map_int_bytes():
return dict([(generate_int(), generate_bytes()) for i in xrange(random.randint(0, 16))])
def generate_map_int_int():
return dict([(generate_int(), generate_int()) for i in xrange(random.randint(0, 16))])
def generate_put():
key = str(random.randint(0, 1024))
attr_gen = {'s': generate_bytes,
'i': generate_int,
'ls': generate_list_bytes,
'li': generate_list_int,
'ss': generate_set_bytes,
'si': generate_set_int,
'mss': generate_map_bytes_bytes,
'msi': generate_map_bytes_int,
'mis': generate_map_int_bytes,
'mii': generate_map_int_int}
value = {}
for attr in random.sample(ATTRS, random.randint(0, len(ATTRS))):
value[attr] = attr_gen[attr]()
return key, value
def generate_del():
key = str(random.randint(0, 1024))
return key,
def generate_atomic_math():
key = str(random.randint(0, 1024))
val = {}
if random.random() < 0.9:
val['i'] = generate_int()
return key, val
def generate_string_pend():
key = str(random.randint(0, 1024))
val = {}
if random.random() < 0.9:
val['s'] = generate_bytes()
return key, val
def generate_list_push():
key = str(random.randint(0, 1024))
val = {}
if random.random() < 0.9:
val['li'] = generate_int()
if random.random() < 0.9:
val['ls'] = generate_bytes()
return key, val
def generate_set_op():
key = str(random.randint(0, 1024))
val = {}
if random.random() < 0.9:
val['si'] = generate_int()
if random.random() < 0.9:
val['ss'] = generate_bytes()
return key, val
def generate_set_set():
key = str(random.randint(0, 1024))
val = {}
if random.random() < 0.9:
val['si'] = generate_set_int()
if random.random() < 0.9:
val['ss'] = generate_set_bytes()
return key, val
def generate_map_op():
key = str(random.randint(0, 1024))
val = {'mss': {}, 'msi': {}, 'mis': {}, 'mii': {}}
if random.random() < 0.9:
val['mss'][str(random.randint(0, 1024))] = generate_bytes()
if random.random() < 0.9:
val['msi'][str(random.randint(0, 1024))] = generate_int()
if random.random() < 0.9:
val['mis'][random.randint(0, 1024)] = generate_bytes()
if random.random() < 0.9:
val['mii'][random.randint(0, 1024)] = generate_int()
return key, val
def generate_map_remove():
key = str(random.randint(0, 1024))
val = {}
if random.random() < 0.9:
val['mss'] = str(random.randint(0, 1024))
if random.random() < 0.9:
val['msi'] = str(random.randint(0, 1024))
if random.random() < 0.9:
val['mis'] = random.randint(0, 1024)
if random.random() < 0.9:
val['mii'] = random.randint(0, 1024)
return key, val
def generate_map_atomic_math():
key = str(random.randint(0, 1024))
val = {'msi': {}, 'mii': {}}
num = random.randint(0, 1024)
keys = [str(k) for k in random.sample(range(0, 1025), num)]
for k in keys:
val['msi'][k] = generate_int()
num = random.randint(0, 1024)
for k in random.sample(range(0, 1025), num):
val['mii'][k] = generate_int()
return key, val
def generate_map_string_pend():
key = str(random.randint(0, 1024))
val = {'mss': {}, 'mis': {}}
num = random.randint(0, 1024)
num = random.randint(0, 1024)
keys = [str(k) for k in random.sample(range(0, 1025), num)]
for k in keys:
val['mss'][k] = generate_bytes()
num = random.randint(0, 1024)
for k in random.sample(range(0, 1025), num):
val['mis'][k] = generate_bytes()
return key, val
def random_generator():
op_gen = {'put': generate_put
,'del': generate_del
,'atomic_add': generate_atomic_math
,'atomic_sub': generate_atomic_math
,'atomic_mul': generate_atomic_math
,'atomic_div': generate_atomic_math
,'atomic_mod': generate_atomic_math
,'atomic_and': generate_atomic_math
,'atomic_or': generate_atomic_math
,'atomic_xor': generate_atomic_math
,'string_prepend': generate_string_pend
,'string_append': generate_string_pend
,'list_lpush': generate_list_push
,'list_rpush': generate_list_push
,'set_add': generate_set_op
,'set_remove': generate_set_op
,'set_intersect': generate_set_set
,'set_union': generate_set_set
,'map_add': generate_map_op
,'map_remove': generate_map_remove
,'map_atomic_add': generate_map_atomic_math
,'map_atomic_sub': generate_map_atomic_math
,'map_atomic_mul': generate_map_atomic_math
,'map_atomic_div': generate_map_atomic_math
,'map_atomic_mod': generate_map_atomic_math
,'map_atomic_and': generate_map_atomic_math
,'map_atomic_or': generate_map_atomic_math
,'map_atomic_xor': generate_map_atomic_math
,'map_string_prepend': generate_map_string_pend
,'map_string_append': generate_map_string_pend
}
while True:
op = random.choice(op_gen.keys())
args = op_gen[op]()
yield op, args
def inlineexception(f):
def newf(*args, **kwargs):
try:
return f(*args, **kwargs)
except HyperClientException as e:
return e
return newf
class HyperDexClient(object):
def __init__(self, addr, port, space):
self._client = hyperclient.Client(addr, port)
try:
self._client.rm_space('fuzz')
except:
pass
try:
self._client.add_space('''
space fuzz
key string k
attributes
string s,
int i,
list(string) ls,
list(int) li,
set(string) ss,
set(int) si,
map(string, string) mss,
map(string, int) msi,
map(int, string) mis,
map(int, int) mii
create 1 partition
''')
time.sleep(1)
except:
pass
self._space = space
@inlineexception
def op_put(self, key, value):
assert self._client.put(self._space, key, value)
return self._client.get(self._space, key)
@inlineexception
def op_del(self, key):
return self._client.delete(self._space, key)
@inlineexception
def _atomic(self, key, ops, func):
func(self._space, key, ops)
return self._client.get(self._space, key)
def op_atomic_add(self, key, nums):
return self._atomic(key, nums, self._client.atomic_add)
def op_atomic_sub(self, key, nums):
return self._atomic(key, nums, self._client.atomic_sub)
def op_atomic_mul(self, key, nums):
return self._atomic(key, nums, self._client.atomic_mul)
def op_atomic_div(self, key, nums):
return self._atomic(key, nums, self._client.atomic_div)
def op_atomic_mod(self, key, nums):
return self._atomic(key, nums, self._client.atomic_mod)
def op_atomic_and(self, key, nums):
return self._atomic(key, nums, self._client.atomic_and)
def op_atomic_or(self, key, nums):
return self._atomic(key, nums, self._client.atomic_or)
def op_atomic_xor(self, key, nums):
return self._atomic(key, nums, self._client.atomic_xor)
def op_string_prepend(self, key, s):
return self._atomic(key, s, self._client.string_prepend)
def op_string_append(self, key, s):
return self._atomic(key, s, self._client.string_append)
def op_list_lpush(self, key, s):
return self._atomic(key, s, self._client.list_lpush)
def op_list_rpush(self, key, s):
return self._atomic(key, s, self._client.list_rpush)
def op_set_add(self, key, s):
return self._atomic(key, s, self._client.set_add)
def op_set_remove(self, key, s):
return self._atomic(key, s, self._client.set_remove)
def op_set_intersect(self, key, s):
return self._atomic(key, s, self._client.set_intersect)
def op_set_union(self, key, s):
return self._atomic(key, s, self._client.set_union)
def op_map_add(self, key, ops):
return self._atomic(key, ops, self._client.map_add)
def op_map_remove(self, key, ops):
return self._atomic(key, ops, self._client.map_remove)
def op_map_atomic_add(self, key, nums):
return self._atomic(key, nums, self._client.map_atomic_add)
def op_map_atomic_sub(self, key, nums):
return self._atomic(key, nums, self._client.map_atomic_sub)
def op_map_atomic_mul(self, key, nums):
return self._atomic(key, nums, self._client.map_atomic_mul)
def op_map_atomic_div(self, key, nums):
return self._atomic(key, nums, self._client.map_atomic_div)
def op_map_atomic_mod(self, key, nums):
return self._atomic(key, nums, self._client.map_atomic_mod)
def op_map_atomic_and(self, key, nums):
return self._atomic(key, nums, self._client.map_atomic_and)
def op_map_atomic_or(self, key, nums):
return self._atomic(key, nums, self._client.map_atomic_or)
def op_map_atomic_xor(self, key, nums):
return self._atomic(key, nums, self._client.map_atomic_xor)
def op_map_string_prepend(self, key, s):
return self._atomic(key, s, self._client.map_string_prepend)
def op_map_string_append(self, key, s):
return self._atomic(key, s, self._client.map_string_append)
def _add_overflow_check(func):
def _func(a, b):
x = func(a, b)
if x < INT64_MIN or x > INT64_MAX:
raise HyperClientException(HYPERCLIENT_OVERFLOW)
return x
return _func
def _set_add(x, y):
x.add(y)
return x
def _set_remove(x, y):
if y in x:
x.remove(y)
return x
def _map_add(x, y):
for k, v in y.iteritems():
x[k] = v
return x
def _map_remove(x, y):
if y in x:
del x[y]
return x
class HyperDexModel(object):
def __init__(self):
def stored():
return {'s': '',
'i': 0,
'ls': list(),
'li': list(),
'ss': set(),
'si': set(),
'mss': dict(),
'msi': dict(),
'mis': dict(),
'mii': dict()}
self._kvs = collections.defaultdict(stored)
self._put_types = {
's': lambda x: isinstance(x, bytes),
'i': lambda x: isinstance(x, int),
'ls': lambda x: isinstance(x, list) and all([isinstance(e, bytes) for e in x]),
'li': lambda x: isinstance(x, list) and all([isinstance(e, int) for e in x]),
'ss': lambda x: isinstance(x, set) and all([isinstance(e, bytes) for e in x]),
'si': lambda x: isinstance(x, set) and all([isinstance(e, int) for e in x]),
'mss': lambda x: isinstance(x, dict) and all([isinstance(k, bytes) and isinstance(v, bytes) for (k, v) in x.iteritems()]),
'msi': lambda x: isinstance(x, dict) and all([isinstance(k, bytes) and isinstance(v, int) for (k, v) in x.iteritems()]),
'mis': lambda x: isinstance(x, dict) and all([isinstance(k, int) and isinstance(v, bytes) for (k, v) in x.iteritems()]),
'mii': lambda x: isinstance(x, dict) and all([isinstance(k, int) and isinstance(v, int) for (k, v) in x.iteritems()])}
@inlineexception
def op_put(self, key, value):
for k, v in value.iteritems():
if k not in self._put_types:
return HyperClientException(HYPERCLIENT_UNKNOWNATTR, k)
if not self._put_types[k](v):
return HyperClientException(HYPERCLIENT_WRONGTYPE, k)
for k, v in value.iteritems():
self._kvs[key][k] = v
return self._kvs[key]
@inlineexception
def op_del(self, key):
if key in self._kvs:
del self._kvs[key]
return True
else:
return False
@inlineexception
def _atomic(self, key, ops, func, validate):
if key not in self._kvs:
return HyperClientException(HYPERCLIENT_NOTFOUND)
for k, v in ops.iteritems():
if k not in self._put_types:
return HyperClientException(HYPERCLIENT_UNKNOWNATTR, k)
if not validate.get(k, lambda x: False)(v):
return HyperClientException(HYPERCLIENT_WRONGTYPE, k)
newobj = self._kvs[key].copy()
newobj = copy.deepcopy(self._kvs[key])
for k, v in ops.iteritems():
newobj[k] = func(newobj[k], v)
self._kvs[key] = newobj
return self._kvs[key]
@inlineexception
def _atomic_map(self, key, ops, func, default, validate):
for k, m in ops.iteritems():
for mk, mv in m.iteritems():
if k not in self._put_types:
return HyperClientException(HYPERCLIENT_UNKNOWNATTR, k)
if not validate.get(k, lambda x, y: False)(mk, mv):
return HyperClientException(HYPERCLIENT_WRONGTYPE, k)
if key not in self._kvs:
return HyperClientException(HYPERCLIENT_NOTFOUND)
newobj = copy.deepcopy(self._kvs[key])
for k, m in ops.iteritems():
for mk, mv in m.iteritems():
if mk not in newobj[k]:
newobj[k][mk] = default
newobj[k][mk] = func(newobj[k][mk], mv)
self._kvs[key] = newobj
return self._kvs[key]
def op_atomic_add(self, key, nums):
return self._atomic(key, nums, _add_overflow_check(operator.add),
{'i': lambda x: isinstance(x, int)})
def op_atomic_sub(self, key, nums):
return self._atomic(key, nums, _add_overflow_check(operator.sub),
{'i': lambda x: isinstance(x, int)})
def op_atomic_mul(self, key, nums):
return self._atomic(key, nums, _add_overflow_check(operator.mul),
{'i': lambda x: isinstance(x, int)})
def op_atomic_div(self, key, nums):
return self._atomic(key, nums, _add_overflow_check(operator.div),
{'i': lambda x: isinstance(x, int)})
def op_atomic_mod(self, key, nums):
return self._atomic(key, nums, _add_overflow_check(operator.mod),
{'i': lambda x: isinstance(x, int)})
def op_atomic_and(self, key, nums):
return self._atomic(key, nums, lambda x, y: x & y,
{'i': lambda x: isinstance(x, int)})
def op_atomic_or(self, key, nums):
return self._atomic(key, nums, lambda x, y: x | y,
{'i': lambda x: isinstance(x, int)})
def op_atomic_xor(self, key, nums):
return self._atomic(key, nums, lambda x, y: x ^ y,
{'i': lambda x: isinstance(x, int)})
def op_string_prepend(self, key, s):
return self._atomic(key, s, lambda x, y: y + x,
{'s': lambda x: isinstance(x, bytes)})
def op_string_append(self, key, s):
return self._atomic(key, s, lambda x, y: x + y,
{'s': lambda x: isinstance(x, bytes)})
def op_list_lpush(self, key, s):
return self._atomic(key, s, lambda x, y: [y] + x,
{'li': lambda x: isinstance(x, int),
'ls': lambda x: isinstance(x, bytes)})
def op_list_rpush(self, key, s):
return self._atomic(key, s, lambda x, y: x + [y],
{'li': lambda x: isinstance(x, int),
'ls': lambda x: isinstance(x, bytes)})
def op_set_add(self, key, s):
return self._atomic(key, s, _set_add,
{'si': lambda x: isinstance(x, int),
'ss': lambda x: isinstance(x, bytes)})
def op_set_remove(self, key, s):
return self._atomic(key, s, _set_remove,
{'si': lambda x: isinstance(x, int),
'ss': lambda x: isinstance(x, bytes)})
def op_set_intersect(self, key, s):
return self._atomic(key, s, lambda x, y: x & y,
{'si': lambda x: isinstance(x, set) and all([isinstance(e, int) for e in x]),
'ss': lambda x: isinstance(x, set) and all([isinstance(e, bytes) for e in x])})
def op_set_union(self, key, s):
return self._atomic(key, s, lambda x, y: x | y,
{'si': lambda x: isinstance(x, set) and all([isinstance(e, int) for e in x]),
'ss': lambda x: isinstance(x, set) and all([isinstance(e, bytes) for e in x])})
def op_map_add(self, key, ops):
return self._atomic(key, ops, _map_add,
{'msi': lambda d: isinstance(d, dict) and all([isinstance(x, bytes) and isinstance(y, int) for x, y in d.iteritems()]),
'mii': lambda d: isinstance(d, dict) and all([isinstance(x, int) and isinstance(y, int) for x, y in d.iteritems()]),
'mss': lambda d: isinstance(d, dict) and all([isinstance(x, bytes) and isinstance(y, bytes) for x, y in d.iteritems()]),
'mis': lambda d: isinstance(d, dict) and all([isinstance(x, int) and isinstance(y, bytes) for x, y in d.iteritems()])})
def op_map_remove(self, key, s):
return self._atomic(key, s, _map_remove,
{'msi': lambda x: isinstance(x, bytes),
'mii': lambda x: isinstance(x, int),
'mss': lambda x: isinstance(x, bytes),
'mis': lambda x: isinstance(x, int)})
def op_map_atomic_add(self, key, nums):
return self._atomic_map(key, nums, _add_overflow_check(operator.add), 0,
{'msi': lambda x, y: isinstance(x, bytes) and isinstance(y, int),
'mii': lambda x, y: isinstance(x, int) and isinstance(y, int)})
def op_map_atomic_sub(self, key, nums):
return self._atomic_map(key, nums, _add_overflow_check(operator.sub), 0,
{'msi': lambda x, y: isinstance(x, bytes) and isinstance(y, int),
'mii': lambda x, y: isinstance(x, int) and isinstance(y, int)})
def op_map_atomic_mul(self, key, nums):
return self._atomic_map(key, nums, _add_overflow_check(operator.mul), 0,
{'msi': lambda x, y: isinstance(x, bytes) and isinstance(y, int),
'mii': lambda x, y: isinstance(x, int) and isinstance(y, int)})
def op_map_atomic_div(self, key, nums):
return self._atomic_map(key, nums, _add_overflow_check(operator.div), 0,
{'msi': lambda x, y: isinstance(x, bytes) and isinstance(y, int),
'mii': lambda x, y: isinstance(x, int) and isinstance(y, int)})
def op_map_atomic_mod(self, key, nums):
return self._atomic_map(key, nums, _add_overflow_check(operator.mod), 0,
{'msi': lambda x, y: isinstance(x, bytes) and isinstance(y, int),
'mii': lambda x, y: isinstance(x, int) and isinstance(y, int)})
def op_map_atomic_and(self, key, nums):
return self._atomic_map(key, nums, _add_overflow_check(lambda x, y: x & y), 0,
{'msi': lambda x, y: isinstance(x, bytes) and isinstance(y, int),
'mii': lambda x, y: isinstance(x, int) and isinstance(y, int)})
def op_map_atomic_or(self, key, nums):
return self._atomic_map(key, nums, _add_overflow_check(lambda x, y: x | y), 0,
{'msi': lambda x, y: isinstance(x, bytes) and isinstance(y, int),
'mii': lambda x, y: isinstance(x, int) and isinstance(y, int)})
def op_map_atomic_xor(self, key, nums):
return self._atomic_map(key, nums, _add_overflow_check(lambda x, y: x ^ y), 0,
{'msi': lambda x, y: isinstance(x, bytes) and isinstance(y, int),
'mii': lambda x, y: isinstance(x, int) and isinstance(y, int)})
def op_map_string_prepend(self, key, nums):
return self._atomic_map(key, nums, lambda x, y: y + x, '',
{'mss': lambda x, y: isinstance(x, bytes) and isinstance(y, bytes),
'mis': lambda x, y: isinstance(x, int) and isinstance(y, bytes)})
def op_map_string_append(self, key, nums):
return self._atomic_map(key, nums, lambda x, y: x + y, '',
{'mss': lambda x, y: isinstance(x, bytes) and isinstance(y, bytes),
'mis': lambda x, y: isinstance(x, int) and isinstance(y, bytes)})
if __name__ == '__main__':
random.seed(0)
hyperdex = HyperDexClient('127.0.0.1', 1982, 'fuzz')
model = HyperDexModel()
for op, args in random_generator():
#print op, args
m = getattr(model, 'op_' + op)(*args)
h = getattr(hyperdex, 'op_' + op)(*args)
if m != h:
print 'There was a mismatch:', op, args
print 'Model:'
pprint.pprint(m)
print 'HyperDex:'
pprint.pprint(h)
sys.exit(1)
| [
"me@robescriva.com"
] | me@robescriva.com | |
a014ff2a43c5baa438da77c270f14acfde2096ba | 1046bc7b2deea022926551ca47084dfdabe82fdf | /regression/urls.py | 8a69c2886d0bca087e91976b2818c6e91dc652b4 | [] | no_license | sking8484/quantcore | 00d1b23e2261635d098946a9eaf955ddec5c3388 | f780108673e12f3d362884cf23da78cc9e65c17d | refs/heads/master | 2022-12-11T19:44:21.636514 | 2019-09-21T23:22:40 | 2019-09-21T23:22:40 | 143,889,230 | 0 | 0 | null | 2022-06-21T21:23:27 | 2018-08-07T14:58:53 | HTML | UTF-8 | Python | false | false | 240 | py | from django.urls import path
from . import views
app_name = 'regression'
urlpatterns = [
path('', views.regression_home, name = 'regression_home'),
path('simple_regression/', views.simple_regression, name = 'simple_regression')
]
| [
"sking8484@gmail.com"
] | sking8484@gmail.com |
284de7e065a248d3b43485969f7b6466a126241a | 8a7cd267ad123524b51ae86710854e9674769c54 | /5/5_1_0.py | 9a57d1e3eef5144e02b430bb1fad1870c2b85ad7 | [] | no_license | wjwjwj2223/NeteaseAlgorithm | 3601d6b21d1654ff993894bc8a80428fb071b163 | 8371e0f11a01a82d50ddbe15c04d34bfd2e698fa | refs/heads/master | 2022-11-13T23:46:14.821761 | 2020-07-06T17:44:13 | 2020-07-06T17:44:13 | 268,696,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,044 | py | class MinHeap(object):
def __init__(self):
self.__datas = [None]
self.__size = 0
def add(self, value):
self.__datas.append(value)
self.__size += 1
self.shitUp(self.__size)
def get_smallest(self):
return self.__datas[1]
def extractMin(self):
minv = self.__datas[1]
self.__datas[1] = self.__datas.pop()
self.__size -= 1
self.shitDown()
return minv
def getSize(self):
return self.__size
def shitUp(self, index):
if index == 1:
return
parent_index = index // 2
if self.__datas[parent_index] > self.__datas[index]:
self.__swap(index, parent_index)
self.shitUp(parent_index)
def shitDown(self):
if self.__size <= 2:
return
currentIndex = 1
michal = self.minChild(currentIndex)
while michal and self.__datas[currentIndex] > self.__datas[michal]:
self.__swap(currentIndex, michal)
currentIndex = michal
michal = self.minChild(currentIndex)
def size(self):
return len(self.__datas)
def isEmpty(self):
return len(self.__datas) == 0
def leftChild(self, index):
return index * 2
def rightChild(self, index):
return index * 2 + 1
def minChild(self, index):
li = self.leftChild(index)
ri = self.rightChild(index)
mini = None
if ri <= self.__size:
if self.__datas[li] > self.__datas[ri]:
mini = ri
else:
mini = li
elif li <= self.__size:
mini = li
return mini
def __swap(self, a, b):
self.__datas[a], self.__datas[b] = self.__datas[b], self.__datas[a]
source = input()
s_list = source.split(' ')
heap = MinHeap()
for s in s_list:
heap.add(int(s))
m = int(input())
minList = []
for i in range(0, m):
minList.append(heap.extractMin())
minList.reverse()
for i in minList:
print(i, end=' ')
| [
"17054214@qq.com"
] | 17054214@qq.com |
509f1fbff89c90660823404621862865de42034f | d11680f0a9de67e742d050b8b45f8c2a21a5cdaf | /Distribucion_Modulos/setup.py | 8c2884a8161e18cdf87687b90cbfd317ae2ee324 | [] | no_license | elaris6/PythonPildoras | eba330dd830e7e215930bc82cdbc996224c8b7d4 | 028610870aa2a7129cee94c69c70ee363b786cae | refs/heads/master | 2023-05-27T19:45:53.336629 | 2021-06-13T15:41:28 | 2021-06-13T15:41:28 | 376,576,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,394 | py | """
Plantilla base para la configuración de la función setup
INFO COMPLETA: https://docs.hektorprofe.net/python/distribucion/setuptools/
INFO OFICIAL: https://docs.python.org/3/distutils/setupscript.html
"""
from setuptools import setup
setup(name="Funciones Matematicas", # Nombre
version="0.1", # Versión de desarrollo
description="Paquete de prueba", # Descripción del funcionamiento
author="Nombre Autor", # Nombre del autor
author_email='prueba@prueba.com', # Email del autor
license="GPL", # Licencia: MIT, GPL, GPL 2.0...
url="http://ejemplo.com", # Página oficial (si la hay)
#packages=['paquete','paquete.sub_paquete'], # Para crear paquetes con subpaquetes
py_modules=['funcionesMatematicas','sub_carpeta.funcionesMatematicasAvanzadas'], # Para crear módulos instalables (directamente en la misma ruta que setup.py)
)
"""
SUB PAQUETES
Para el caso de que haya muchos subpaquetes, se puede usar la función find_packages
from setuptools import setup, find_packages
setup(...
packages=find_packages()
)
DEPENDENCIAS EXTERNAS
# Se instala la versión más actual
setup(...,
install_requires=["pillow"],
)
# Se instala la versión exacta indicada
setup(...,
install_requires=["pillow==1.1.0"],
)
# Se instala la versión igual o superior a la indicada
setup(...,
install_requires=["pillow>=1.1.5"],
)
MUCHAS DEPENDENCIAS
Creamos fichero "requirements.txt" con contenido de ejemplo:
pillow==1.1.0
django>=1.10.0,<=1.10.3
pygame
y en el fichero setup.py se indica:
setup(...,
install_requires=[i.strip() for i in open("requirements.txt").readlines()],
)
SUITE TEST
| setup.py
| requeriments.txt
+ prueba/
| __init__.py
| modulo.py
+ tests/
| __init__.py
| test_pillow.py
| test_django.py
| test_pygame.py
setup(...,
test_suite="tests"
)
c:\>python setup.py test
PRUEBA PAQUETE
# Instalación para pruebas
python setup.py develop [--uninstall]
# Instalación definitiva. Para modificar funcionamiento hay que reinstalar con PIP
python setup.py install
DISTRIBUCION PAQUETE
# Crear comprimido para distribuir localmente
python setup.py sdist
# Instalación del paquete en otro entorno local
pip install nombre_del_fichero.zip
"""
| [
"elaris6@gmail.com"
] | elaris6@gmail.com |
4fec274bfab24eb6ba90fb5a46ec128bbc6be095 | baacc8032435bb79a44a1662ebb34cea788ef716 | /correct/mapper.py | 65befa763ec2db0e14da41542df82ec609e7cad4 | [] | no_license | CHIAHSINHOU/NLPProject | de2a9bd2d65dd167b973e936bd9e224a5ba7761d | c4ff0de0978ec2b3d91a09e0a0379facbd480b1d | refs/heads/master | 2021-01-09T20:57:29.240764 | 2016-06-20T05:29:39 | 2016-06-20T05:29:39 | 61,518,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,906 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import fileinput
import random
import codecs
from itertools import izip
from sqlite import bncTag
from collections import defaultdict
import nltk
from nltk.stem import WordNetLemmatizer
feq = defaultdict(lambda: defaultdict(lambda: 0))
def words(text): return re.findall('[A-Za-z]+', text)
def to_sent(tokens, to_correct=True):
for token in tokens:
if token.startswith('{+') and token.endswith('+}'):
if to_correct:
yield token[2:-2]
elif token.startswith('[-') and token.endswith('-]'):
if not to_correct:
yield token[2:-2]
else:
yield token
def get_diff_index(list1, list2):
if len(list1) > len(list2):
l1 = list2
l2 = list1
else:
l1 = list1
l2 = list2
for n, item2 in enumerate(l1):
if item2 != l2[n]:
return n;
# return {'LC1RR':RR[(LC1, w)], 'RC1RR':RR[(w, RC1)], 'LC+RC': RR[(LC1,w)]+RR[(w, RC1)]}
if __name__ == '__main__':
# words = 'This concert hall was too small to enter all of the audience .'.split()
# tagged = bncTag(words)
lemmatizer = WordNetLemmatizer()
# print lemmatizer.lemmatize('better', pos='a')
count = 0
for line_no, line in enumerate(fileinput.input()):
# print line_no
try:
line = line.decode('utf-8').strip().lower()
except:
continue
# token: word, edit or consecutive edits
tokens = re.findall("((((\[\-((?!\-\]).)*\-\])|(\{\+((?!\+\}).)*\+\})))+|[\w-]+|\S+)", line)
tokens = [ elements[0] for elements in tokens ]
# output the entry if at least 1 edit exists
words_correct = words(' '.join(to_sent(tokens)))
words_incorrect = words(' '.join(to_sent(tokens, False)))
# print words_correct
# print words_incorrect
index = get_diff_index(words_correct, words_incorrect)
correct_tagged = bncTag(words_correct)
incorrect_tagged = bncTag(words_incorrect)
if index != None:
if correct_tagged[index] == 'a' and incorrect_tagged[index] == 'a':
w1 = lemmatizer.lemmatize(words_incorrect[index], pos='a')
w2 = lemmatizer.lemmatize(words_correct[index], pos='a')
if w1 != w2:
feq[w1][w2] += 1
count +=1
if count > 100:
break
# print '{}_{}'.format(
# lemmatizer.lemmatize(words_incorrect[index], pos='a').encode('unicode_escape'),
# lemmatizer.lemmatize(words_correct[index], pos='a').encode('unicode_escape'))
for key, item in feq.items():
for w, count in item.items():
print '{}\t{}\t{}'.format(key.encode('unicode_escape'), w.encode('unicode_escape'), count)
| [
"marquis.js1215@gmail.com"
] | marquis.js1215@gmail.com |
c4a3021747748314e5d24317a1443dab666f7ea8 | 357f48ccbd3e856ed8b4ce5d6954c8fee2de292b | /got10k/trackers/tracker_evaluate.py | 3726b8dd77cd3eb845f57d2fd1dc0b55b0aab13c | [
"MIT"
] | permissive | whjzsy/ESiamFC | f01057f14d84151842b5050aa2a17ce72d774743 | 742ac17323f0a13bcb03a0087583afc3aa13ddc0 | refs/heads/main | 2023-04-13T02:20:29.966522 | 2021-04-25T03:12:06 | 2021-04-25T03:12:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | from got10k.trackers import Tracker
from got10k.experiments import ExperimentGOT10k, ExperimentOTB, ExperimentUAV123, ExperimentVOT,\
ExperimentDTB70, ExperimentNfS, ExperimentLaSOT, ExperimentTColor128
from siamfc import SiamFCTrackerMixCw
import cv2
import numpy as np
tracker_names = ['SiamFC', 'siamfc_mix_cw4', 'siamfc_mix_cw7', 'siamfc_mix_cw27']
experiment_lasot = ExperimentLaSOT(root_dir=r'E:\dataset\tracker_evaluate_dataset\LaSOT')
experiment_lasot.report(tracker_names)
| [
"noreply@github.com"
] | noreply@github.com |
ce3be2e0574e1ed136c469dfa1ef2ac357ed40cc | dfb8d3c365bd2ea27cef9af5cb00b7be1dae978d | /train.py | b23821e543d1b2a73205ead7e410f2b5b7bac887 | [
"MIT"
] | permissive | Windstudent/IRM-based-Speech-Enhancement-using-DNN | dd0cedfd4150fed69c55d33a744d0a6520fdf2d5 | 27a6f73b5b7fa91a4796e093e6ea3e30508a5c15 | refs/heads/master | 2020-07-05T15:34:52.712226 | 2019-05-07T14:36:40 | 2019-05-07T14:36:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | import argparse
import json
import os
import numpy as np
import torch
from torch.utils.data import DataLoader
from data.test_dataset import TestDataset
from data.train_dataset import TrainDataset
from trainer.trainer import Trainer
from utils.utils import initialize_config
def main(config, resume):
"""
训练脚本的入口函数
Notes:
1. 加载数据集
2. 初始化模型
3. 设置优化器
4. 选择损失函数
5. 训练脚本 run
Args:
config (dict): 配置项
resume (bool): 是否加载最近一次存储的模型断点
"""
torch.manual_seed(config["seed"])
np.random.seed(config["seed"])
train_dataset = TrainDataset(
mixture_dataset=config["train_dataset"]["mixture"],
mask_dataset=config["train_dataset"]["clean"],
limit=config["train_dataset"]["limit"],
offset=config["train_dataset"]["offset"],
)
train_data_loader = DataLoader(
dataset=train_dataset,
batch_size=config["train_dataset"]["batch_size"],
num_workers=config["train_dataset"]["num_workers"],
shuffle=config["train_dataset"]["shuffle"]
)
valid_dataset = TestDataset(
mixture_dataset=config["valid_dataset"]["mixture"],
clean_dataset=config["valid_dataset"]["clean"],
limit=config["valid_dataset"]["limit"],
offset=config["valid_dataset"]["offset"],
)
valid_data_loader = DataLoader(
dataset=valid_dataset
)
model = initialize_config(config["model"])
optimizer = torch.optim.Adam(
params=model.parameters(),
lr=config["optimizer"]["lr"]
)
loss_function = initialize_config(config["loss_function"])
trainer = Trainer(
config=config,
resume=resume,
model=model,
loss_function=loss_function,
optim=optimizer,
train_dl=train_data_loader,
validation_dl=valid_data_loader,
)
trainer.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='IRM Estimation using DNN in Speech Enhancement')
parser.add_argument("-C", "--config", required=True, type=str, help="训练配置文件(*.json)")
parser.add_argument('-D', '--device', default=None, type=str, help="本次实验使用的 GPU 索引,e.g. '1,2,3'")
parser.add_argument("-R", "--resume", action="store_true", help="是否从最近的一个断点处继续训练")
args = parser.parse_args()
if args.device:
os.environ["CUDA_VISIBLE_DEVICES"] = args.device
# load config file
config = json.load(open(args.config))
config["train_config_path"] = args.config
main(config, resume=args.resume)
| [
"haoxiangsnr@gmail.com"
] | haoxiangsnr@gmail.com |
b88a7058a2033f293cfcbc9c5e9fdc372f1fd63b | 7341fa30ae20d00e9df62bff3cdf2f185c41b33a | /Codeup/src_6/1076.py | 31a1bacb367fc451e2acaf2a14b2745eb366f86c | [] | no_license | kkimke/Algorithm | 4ecaee6f90e796ed0bf66bed0d17128f8d0f48d0 | 8f8a692dda850b626ae443bf25fdefb160649c36 | refs/heads/master | 2023-03-07T13:55:37.730743 | 2021-02-21T15:17:23 | 2021-02-21T15:17:23 | 334,133,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | value = input()
value_chr = ord(value)
a = ord('a')
while a <= value_chr:
print(chr(a), end=' ')
a += 1
| [
"k_k_eun@naver.com"
] | k_k_eun@naver.com |
dd4307bb2aab043668db4211e55bef131034b27d | cf3d8e0edb97b96dc996b52d13ea740f40a7758e | /hw5/calc_emp_exp.py | 5ed3465c7677bb6b492d48846f4324c123617d57 | [] | no_license | HanfeiChen/ling572 | e76b80efdc0d5bd59963a39a9d7c0a27908069ce | ea70faaf21c09151d416e8d43035f00d798af407 | refs/heads/main | 2023-04-29T11:01:59.883541 | 2021-03-10T22:37:02 | 2021-03-10T22:37:02 | 328,307,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,389 | py | import sys
from collections import Counter, defaultdict
from typing import Tuple
def load_data(file_path: str) -> Tuple:
x, y = [], []
with open(file_path, 'r') as in_file:
for line in in_file:
if len(line.strip()) > 0:
parts = line.split()
label, feature_strings = parts[0], parts[1:]
features = Counter()
for feat_str in feature_strings:
feat, val = feat_str.split(':')
features[feat] = int(val)
x.append(features)
y.append(label)
return x, y
if __name__ == '__main__':
TRAIN_DATA, OUTPUT_FILE = sys.argv[1:3]
x_train, y_train = load_data(TRAIN_DATA)
raw_counts = Counter()
for xi, yi in zip(x_train, y_train):
for feat, value in xi.items():
if value > 0:
raw_counts[yi, feat] += 1
expectations = defaultdict(float, {k: v / len(y_train) for k, v in raw_counts.items()})
label_set = set(y_train)
feat_set = set()
for xi in x_train:
for feat, value in xi.items():
feat_set.add(feat)
with open(OUTPUT_FILE, 'w') as of:
for label in sorted(label_set):
for feat in sorted(feat_set):
print(label, feat, f'{expectations[label, feat]:.5f}', raw_counts[label, feat],
file=of)
| [
"hanfec@cs.washington.edu"
] | hanfec@cs.washington.edu |
ae67ba6fa52cde3b51fcce22a0b076528d1ae5d6 | 21b632797ed6257b13574c341cdd14e6534728a9 | /ryu/app/simple_switch_13.py | e0b90fb2286134a15afe5653cb7a916d20db0bbd | [
"Apache-2.0"
] | permissive | MrCocoaCat/ryu | 0473f04e2a840e027e9002f8a6af81745eaf7094 | 9e9571991a73380099b7ba7c6f37e0e587080a6a | refs/heads/master | 2021-06-19T18:09:52.833590 | 2020-05-12T08:17:21 | 2020-05-12T08:17:21 | 163,072,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,319 | py | # -*- coding: utf-8 -*-
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import CONFIG_DISPATCHER, MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
from ryu.lib.packet import ether_types
# 继承ryu.base.app_manager.RyuApp
class SimpleSwitch13(app_manager.RyuApp):
# 指定openflow版本
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
def add_flow(self, datapath, priority, match, actions, buffer_id=None):
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# Instruction 规定对符合match时,所进行的指令
# OFPInstructionGotoTable
# OFPInstructionWriteMetadata:寫入Metadata 以做為下一個table 所需的參考資料。
# OFPInstructionActions:在目前的action set中寫入新的action,如果有相同的则覆盖
# OFPInstructionMeter:指定該封包到所定義的meter table。
inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
actions)]
# APPLY_ACTIONS为立即为报文执行指令
if buffer_id:
# 消息类别为OFPFlowMod,instructions为指令
mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
priority=priority, match=match,
instructions=inst)
else:
mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
match=match, instructions=inst)
# 发送至switch
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
def switch_features_handler(self, ev):
# ryu.controller.handler.set_ev_cls 最为其修飾函數
# CONFIG_DISPATCHER状态:接收SwitchFeatures 訊息
# ev.mag 存储消息类别实例,ryu.ofproto.ofproto_v1_3_parser.OFPSwitchFeatures
# datapath 类存储讯息相关事件 ryu.controller.controller.Datapath
datapath = ev.msg.datapath
#print("{:0>16x}".format(datapath.id))
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
# install table-miss flow entry
# We specify NO BUFFER to max_len of the output action due to
# OVS bug. At this moment, if we specify a lesser number, e.g.,
# 128, OVS will send Packet-In with invalid buffer_id and
# truncated packet data. In that case, we cannot output packets
# correctly. The bug has been fixed in OVS v2.1.0.
# 封包
# 當封包沒有match 任何一個普通Flow Entry 時,則觸發Packet-In。
match = parser.OFPMatch()
# OFPActionOutput类是用来指定封包的类
# 指定为OUTPUT action 类别
# OFPP_CONTROLLER:轉送到Controller 的Packet-In 訊息
# OFPP_FLOOD:轉送(Flood)到所有VLAN的物理連接埠,除了來源埠跟已閉鎖的埠
# 当指定OFPCML_NO_BUFFER时,将全部加入packet-in,
# 不会暂存在交换机中
actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
ofproto.OFPCML_NO_BUFFER)]
# 优先级为0
self.add_flow(datapath, 0, match, actions)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
# PacketIn事件,接收位置目的的封包
# MAIN_DISPATCHER:一般状态
# If you hit this you might want to increase
# the "miss_send_length" of your switch
if ev.msg.msg_len < ev.msg.total_len:
self.logger.debug("packet truncated: only %s of %s bytes",
ev.msg.msg_len, ev.msg.total_len)
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
parser = datapath.ofproto_parser
in_port = msg.match['in_port']
# 获取源地址,目的地址
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
if eth.ethertype == ether_types.ETH_TYPE_LLDP:
# ignore lldp packet
return
dst = eth.dst
src = eth.src
dpid = datapath.id
# mac_to_port为类内定义的字典
# 查找键值,若无想对应的键值,则设置
# 如果字典中包含有给定键,则返回该键对应的值,否则返回为该键设置的值。
self.mac_to_port.setdefault(dpid, {})
# self.logger.info("packet in %x %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
# print self.mac_to_port
# 如果目的IP 在字典中,获取其值
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
# 否则,设为flood发送
out_port = ofproto.OFPP_FLOOD
# 指定动作
actions = [parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
# 如果可以获取到out_port的值
if out_port != ofproto.OFPP_FLOOD:
match = parser.OFPMatch(in_port=in_port, eth_dst=dst)
# verify if we have a valid buffer_id, if yes avoid to send both
# flow_mod & packet_out
if msg.buffer_id != ofproto.OFP_NO_BUFFER:
self.add_flow(datapath, 1, match, actions, msg.buffer_id)
return
else:
self.add_flow(datapath, 1, match, actions)
# 如果发送flood ,则不写入流表
data = None
if msg.buffer_id == ofproto.OFP_NO_BUFFER:
data = msg.data
# OFPPacketOut:The controller uses this message to send a packet out throught the switch
out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
in_port=in_port, actions=actions, data=data)
# 发送至switch
datapath.send_msg(out)
| [
"liyubo_liyubo@163.com"
] | liyubo_liyubo@163.com |
ccb2f81c785195a99f73977f2c1b74bdc2d3aaf2 | d224718f35e0f2bb9bfeac383f02f4def63b4ab9 | /manage.py | 62097d715b7c9913ff93648238519ea43ac74cac | [
"MIT"
] | permissive | sknzl/django_scooter | aab33f0427b4f1366e95266612290fa046812fe1 | f567d745057dbbcb72fb874fff3761d7b61781da | refs/heads/master | 2022-12-12T10:20:37.055888 | 2020-02-08T19:30:04 | 2020-02-08T19:30:04 | 226,722,024 | 3 | 0 | MIT | 2022-12-08T03:16:49 | 2019-12-08T19:46:50 | JavaScript | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_scooter.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"swenkuenzel@gmail.com"
] | swenkuenzel@gmail.com |
b24ceb6287ace19ba8f665a9ecd677df1c9687e5 | d211cad66692c969da1fb6537b6d166278cd3041 | /tr_project/tr_project/wsgi.py | 3d4ec084c7132dbd4908af5fd9c6500f780c93ad | [] | no_license | breabrown/Treasure-Roam | 795658d04c69702a1eebeb382138f86d300a76a3 | 587ca52b1c90079ec63cf00b7296a9ed0e29cec7 | refs/heads/main | 2023-08-11T20:37:12.232934 | 2021-09-17T23:10:13 | 2021-09-17T23:10:13 | 402,581,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for tr_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tr_project.settings')
application = get_wsgi_application()
| [
"breabrown96@gmail.com"
] | breabrown96@gmail.com |
1fcbc83052d0923c11d5d540ba267bcd6be93890 | 0d39507b93a285b87d619a2871cd56a8d3b5981f | /time_entries/migrations/0002_auto_20190617_1639.py | c95dccec31d3ab1b9e8c2b1985f3ae03aade9d19 | [] | no_license | alexveklenko/project_management_system | 0d4c848f044196da0fec0fb46e29ad5d517bce29 | 5da512ed74cecda6ca0841fd8aec713e62290867 | refs/heads/master | 2022-12-11T16:03:10.837320 | 2019-06-26T15:38:30 | 2019-06-26T15:38:30 | 191,401,515 | 0 | 0 | null | 2022-12-08T06:19:44 | 2019-06-11T15:35:45 | JavaScript | UTF-8 | Python | false | false | 381 | py | # Generated by Django 2.2.1 on 2019-06-17 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('time_entries', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='timeentry',
name='comment',
field=models.TextField(blank=True),
),
]
| [
"alexi.veklenko@gmail.com"
] | alexi.veklenko@gmail.com |
b0b1ff9b1713b7cd34558ecca67f4f5512693ffb | 4261aab803f59d4ac2c63c34e43517ed73a36c87 | /anno_lemur/ebola/nist/findmore2.py | bcf40fc1f1e4e1894a0d7df258322730bffd6b77 | [] | no_license | zigchg/MemexCHT | 0a9c5b37b84c5dc5e932df2fb11de5a8abd5c1ef | c22c830821c56f79b9230b0a0ca5e9298125dde3 | refs/heads/master | 2021-06-19T05:42:38.471138 | 2017-06-22T05:11:29 | 2017-06-22T05:11:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | import requests
from requests.auth import HTTPBasicAuth
import sys
import json
'''
solrURL = "http://cs-sys-1.uis.georgetown.edu/solr/ebola/browse?wt=json&start=0&rows=150&q=%s"%sys.argv[1]
username = "infosense"
password = "test@123"
r = requests.get(solrURL, auth=HTTPBasicAuth(username, password))
data = json.loads(r.content)
for doc in data['response']['docs']:
print doc['id']
'''
print '11111'
print 'eeeeee'
print "ebola-3b78191de8b6c6f4473d3d9930fbcd35589d8ba07e3badaccb427a48a9f1ab20"
print "ebola-0f817f9da047f85685886f8cd8aa3b63b0ab8caeaa206a556a8ad491b2e21b15"
print "ebola-293d9564803185d09cbec3a4e8ff14d01859b0fdbe7c9f78e8f9df77600984ad"
| [
"cz211@cs-sys-1.uis.georgetown.edu"
] | cz211@cs-sys-1.uis.georgetown.edu |
241fc145e05f64e44afafeafac15d1ad80858bfa | 1e24c2340f13da991a50a297d050eee790905267 | /swigibpy/IButils.py | 8501e3b647b6d5c17a1d745d139bfaebcc380a2a | [] | no_license | viponedream/quantway | 5f0c49b3cd40b6d005f7b3c33c20f00526205fed | f7c2ecfca3533cb2c1a41ab3e8496004dc7b17d2 | refs/heads/master | 2020-12-11T05:35:12.461213 | 2016-02-16T08:48:09 | 2016-02-16T08:48:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,644 | py | import pandas as pd
import numpy as np
DEFAULT_VALUE=np.nan
class autodf(object):
'''
Object to make it easy to add data in rows and return pandas time series
Initialise with autodf("name1", "name2", ...)
Add rows with autodf.add_row(name1=..., name2=...., )
To data frame with autodf.to_pandas
'''
def __init__(self, *args):
storage=dict()
self.keynames=args
for keyname in self.keynames:
storage[keyname]=[]
self.storage=storage
def add_row(self, **kwargs):
for keyname in self.storage.keys():
if keyname in kwargs:
self.storage[keyname].append(kwargs[keyname])
else:
self.storage[keyname].append(DEFAULT_VALUE)
def to_pandas(self, indexname=None):
if indexname is not None:
data=self.storage
index=self.storage[indexname]
data.pop(indexname)
return pd.DataFrame(data, index=index)
else:
return pd.DataFrame(self.storage)
def bs_resolve(x):
if x<0:
return 'SELL'
if x>0:
return 'BUY'
if x==0:
raise Exception("trying to trade with zero")
def action_ib_fill(execlist):
"""
Get fills (eithier ones that have just happened, or when asking for orders)
Note that fills are cumulative, eg for an order of +10 first fill would be +3, then +9, then +10
implying we got 3,6,1 lots in consecutive fills
The price of each fill then is the average price for the order so far
"""
print "recived fill as follows:"
print ""
print execlist
print ""
| [
"borderj@gmail.com"
] | borderj@gmail.com |
59ba99753b2ed65816cd83e17a727c06c3945eb8 | 3f69114d6af9a978f976452ad177faeaee0b9c20 | /seq2seq.py | 2c7adec4ce8af33f0f4f42d2562a26d735d71cd2 | [] | no_license | JisuHann/2019-Naver-AIHackerton | b41ee393cf9fab9245d9d54ae6c772aec9c1ce64 | 4df41da6897714ee8c2ee62c990c7b2dcb2a5a74 | refs/heads/master | 2022-11-07T04:26:35.858170 | 2020-07-04T06:33:02 | 2020-07-04T06:33:02 | 213,789,255 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | import time
import sys
import torch.nn as nn
import torch.nn.functional as F
class Seq2seq(nn.Module):
#기본 설정
def __init__(self, encoder, decoder, decode_function=F.log_softmax):
super(Seq2seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.decode_function = decode_function
def flatten_parameters(self):
self.encoder.rnn.flatten_parameters()
self.decoder.rnn.flatten_parameters()
#전체RNN 진행
def forward(self, input_variable, input_lengths=None, target_variable=None,
teacher_forcing_ratio=0):
#encoder 실행 :
encoder_outputs, encoder_hidden = self.encoder(input_variable, input_lengths)
#decoder 실행
result = self.decoder(inputs=target_variable,
encoder_hidden=encoder_hidden,
encoder_outputs=encoder_outputs,
function=self.decode_function,
teacher_forcing_ratio=teacher_forcing_ratio)
return result[0]
| [
"jshcdi6658@gmail.com"
] | jshcdi6658@gmail.com |
0b2c7b6c78f2f20e685b99106e28b2dcfabe7a03 | 9d852841463c64f75da8a8579c32cea856d2073d | /leetcode/validate_binary_search_tree.py | 4931cdb9a7da506dea78bd8a759a89b592284296 | [] | no_license | LarsIndus/algorithms-DS | 2d94a5ba3e17de7c8d9e7ac4ace8eb70bb2a7331 | 32a64a4522f8474ab63421b06e945f6e44a441e1 | refs/heads/master | 2023-04-26T00:13:06.026785 | 2021-05-20T18:55:12 | 2021-05-20T18:55:12 | 243,239,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,476 | py | """
Leetcode Problem 98: Validate Binary Search Tree (Medium)
Given the root of a binary tree, determine if it is a valid binary search tree (BST).
A valid BST is defined as follows:
- The left subtree of a node contains only nodes with keys less than the node's key.
- The right subtree of a node contains only nodes with keys greater than the node's key.
- Both the left and right subtrees must also be binary search trees.
Complexity for this solution:
O(n) time and space
Source: https://www.youtube.com/watch?v=ofuXorE-JKE
"""
# Node implementation --------------------------------------------------------
class newNode:
# Construct to create a newNode
def __init__(self, data):
self.data = data
self.left = None
self.right = None
# Solution -------------------------------------------------------------------
def is_valid_BST(root):
return helper(root, float("-inf"), float("inf"))
def helper(root, min_value, max_value):
if root is None:
return True
if root.data < min_value or root.data > max_value:
return False
valid_left = helper(root.left, min_value, root.data)
valid_right = helper(root.right, root.data, max_value)
return valid_left and valid_right
# Testing --------------------------------------------------------------------
def main():
# Test 1: Empty tree
tree = None
if is_valid_BST(tree):
print("Passed test 1 (emtpy tree).")
else:
print("Test 1 (empty tree) failed!")
# Test 2: Only root node
tree = newNode(1)
if is_valid_BST(tree):
print("Passed test 2 (only root node).")
else:
print("Test 2 (only root node) failed!")
# Test 3: Valid BST
tree = newNode(2)
tree.left = newNode(1)
tree.right = newNode(3)
tree.left.left = newNode(0)
tree.right.left = newNode(2)
tree.right.right = newNode(9)
if is_valid_BST(tree):
print("Passed test 3 (valid tree).")
else:
print("Test 3 (valid tree) failed!")
# Test 4: Non-valid BST
tree = newNode(2)
tree.left = newNode(1)
tree.right = newNode(3)
tree.left.left = newNode(0)
tree.right.left = newNode(1)
tree.right.right = newNode(9)
if not is_valid_BST(tree):
print("Passed test 4 (non-valid tree).")
else:
print("Test 4 (non-valid tree) failed!")
if __name__ == '__main__':
main() | [
"test@test.com"
] | test@test.com |
917ee22efda930af791d71d0b9bbed0db2238f83 | 86fad291feed8c051a7690956698f2d82d2a3f8d | /Tomato_all_other.py | 16b4064e0592bbf4817272b2c5a158df3b54bf3c | [] | no_license | abhinavshukla021/AgroMl | 94289624dea63ec4d72c825fbec8a0e82ff0186e | 178a480b96c043569a4410a0145f056b53188585 | refs/heads/master | 2020-06-04T05:05:51.515755 | 2019-07-18T07:55:05 | 2019-07-18T07:55:05 | 191,883,057 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,515 | py | def Tomato_all_others():
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from array import *
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC, LinearSVC
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
import collections
import itertools
Rh2 = np.arange(75,100,0.1)
Rh3 = np.arange(75,100,0.1)
Rh4 = np.arange(70,100,0.1)
Rh5 = np.arange(80,100,0.1)
T2 = np.arange(20,25,0.1)
T3 = np.arange(25,30,0.1)
T4 = np.arange(25,30,0.1)
T5 = np.arange(15,21,0.1)
dict = { 0:'Damping Off', 1:'Septorial Leaf Spot', 2:'Bacterial Stem and Fruit Canker', 3:'Early Blight', 4:'Bacterial Leaf Spot'}
Stage = ['Branching', 'Flowering', 'Fruiting','Seedling', 'Stem Elongation']
df2 = pd.DataFrame(data=(list(itertools.product(Rh2,T2,[1]))),columns=['Rh', 'T', 'Disease'])
print(df2.shape)
df3 = pd.DataFrame(data=(list(itertools.product(Rh3,T3,[2]))),columns=['Rh', 'T', 'Disease'])
print(df3.shape)
df4 = pd.DataFrame(data=(list(itertools.product(Rh4,T4,[3]))),columns=['Rh', 'T', 'Disease'])
print(df4.shape)
df5 = pd.DataFrame(data=(list(itertools.product(Rh5,T5,[4]))),columns=['Rh', 'T', 'Disease'])
print(df5.shape)
df = df2.append(df3.append(df4.append(df5, ignore_index=True),ignore_index=True),ignore_index=True)
features = ['Rh','T']
df = df.sample(frac=1).reset_index(drop = True)
x = df.loc[:,features].values
y = df.loc[:,['Disease']].values
l1 = [ 'Septorial Leaf Spot', 'Bacterial Stem and Fruit Canker', 'Early Blight', 'Bacterial Leaf Spot' ]
colors = 'krby'
l = [1,2,3,4]
for i, color in zip(l, colors):
idx = np.where(y == i)
plt.scatter(x[idx, 0], x[idx, 1], c=color, edgecolor='black', cmap=plt.cm.Paired, s=20)
plt.xlabel('Rh')
plt.ylabel('T')
plt.title('Variation of Diseases with Temperature and Relative Humidity')
plt.legend(l1)
x_tr, x_te, y_tr, y_te = train_test_split(x,y,test_size=0.1)
x_de, x_te, y_de, y_te = train_test_split(x_te,y_te, test_size=0.5)
clf1 = SVC(kernel='poly', probability=True).fit(x_tr,y_tr)
print(clf1.score(x_tr,y_tr.astype('int')))
print(clf1.score(x_de,y_de.astype('int')))
print(clf1.score(x_te,y_te.astype('int')))
return clf1
| [
"noreply@github.com"
] | noreply@github.com |
c5f2db5dd1aae4f0c5402f0642cd0648f098b6d5 | 7d556e5c3dc0b5fe15c57795574a5adbb79e8ebc | /api.py | 7e4549bf70b9eb6f9d21d4cc50fa6e24a93ea8a7 | [] | no_license | Ummamali/burgerBuilderBackend | 6ef385a410929c74d3d288e4e1b6f4f2093933c6 | 150a7c247cdb59d023e66405af03c06fcf669a6e | refs/heads/master | 2023-08-17T07:47:26.986055 | 2021-09-14T19:10:02 | 2021-09-14T19:10:02 | 356,675,829 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,030 | py | from flask import Flask, jsonify, request
from flask_cors import cross_origin
from time import sleep
from json import dumps, loads, load, dump
app = Flask(__name__)
prices = {
'salad': 10,
'bacon': 10,
'cheese': 10,
'meat': 10
}
orders = 0
@app.route('/prices')
@cross_origin()
def data_loader():
sleep(1)
return jsonify({'status':200, 'data':prices})
@app.route('/order', methods=('POST', ))
@cross_origin()
def order():
req_json = request.get_json()
if (req_json.get('token') == 2019):
save_data(req_json['data'])
return jsonify({'status': 200, 'id': orders - 1})
else:
return jsonify({'status': 401, 'msg': 'Unauthorized request.'})
def save_data(data):
sleep(2)
global orders
with open('data.json', mode='r') as f:
order_list = load(f)
data['id'] = orders
order_list.append(data)
with open('data.json', mode='w') as f:
dump(order_list, f, indent=2)
orders += 1
if __name__ == "__main__":
app.run(debug=True) | [
"ummamali1@gmail.com"
] | ummamali1@gmail.com |
5bba101ad14c50d3036fcbeb1308b66a5996c235 | 38744aa4f3ba165a8c043ac51c87b849882ea129 | /game/lib/python3.7/site-packages/Security/_metadata.py | 7cce69ed3c96048306a7eabc4a36fc086d625bc0 | [] | no_license | CleverParty/containers | 5be3c82e38e65ccbaf703fe68f35992ad9941219 | a0d45e62fda2cb7b047c7a930cf6437e71a31d80 | refs/heads/master | 2023-08-04T01:32:58.122067 | 2021-02-07T15:14:35 | 2021-02-07T15:14:35 | 266,421,416 | 2 | 0 | null | 2021-09-22T19:39:31 | 2020-05-23T21:20:17 | Python | UTF-8 | Python | false | false | 107,766 | py | # This file is generated by objective.metadata
#
# Last update: Sun Mar 22 17:27:39 2020
#
# flake8: noqa
import objc, sys
if sys.maxsize > 2 ** 32:
def sel32or64(a, b):
return b
else:
def sel32or64(a, b):
return a
misc = {}
misc.update(
{
"AuthorizationExternalForm": objc.createStructType(
"AuthorizationExternalForm",
b"{_AuthorizationExternalForm=[32C]}",
["bytes"],
),
"SecKeychainSettings": objc.createStructType(
"SecKeychainSettings",
b"{SecKeychainSettings=IZZI}",
["version", "lockOnSleep", "useLockInterval", "lockInterval"],
),
"SecItemImportExportKeyParameters": objc.createStructType(
"SecItemImportExportKeyParameters",
b"{_SecItemImportExportKeyParameters=II@@@@@@}",
[
"version",
"flags",
"passphrase",
"alertTitle",
"alertPrompt",
"accessRef",
"keyUsage",
"keyAttributes",
],
),
"SecKeychainCallbackInfo": objc.createStructType(
"SecKeychainCallbackInfo",
b"{SecKeychainCallbackInfo=I@@i}",
["version", "item", "keychain", "pid"],
),
"AuthorizationItem": objc.createStructType(
"AuthorizationItem",
b"{_AuthorizationItem=^cL^vI}",
["name", "valueLength", "value", "flags"],
),
}
)
constants = """$kCMSEncoderDigestAlgorithmSHA1$kCMSEncoderDigestAlgorithmSHA256$kSSLSessionConfig_3DES_fallback$kSSLSessionConfig_ATSv1$kSSLSessionConfig_ATSv1_noPFS$kSSLSessionConfig_RC4_fallback$kSSLSessionConfig_TLSv1_3DES_fallback$kSSLSessionConfig_TLSv1_RC4_fallback$kSSLSessionConfig_TLSv1_fallback$kSSLSessionConfig_anonymous$kSSLSessionConfig_default$kSSLSessionConfig_legacy$kSSLSessionConfig_legacy_DHE$kSSLSessionConfig_standard$kSecACLAuthorizationAny$kSecACLAuthorizationChangeACL$kSecACLAuthorizationChangeOwner$kSecACLAuthorizationDecrypt$kSecACLAuthorizationDelete$kSecACLAuthorizationDerive$kSecACLAuthorizationEncrypt$kSecACLAuthorizationExportClear$kSecACLAuthorizationExportWrapped$kSecACLAuthorizationGenKey$kSecACLAuthorizationImportClear$kSecACLAuthorizationImportWrapped$kSecACLAuthorizationIntegrity$kSecACLAuthorizationKeychainCreate$kSecACLAuthorizationKeychainDelete$kSecACLAuthorizationKeychainItemDelete$kSecACLAuthorizationKeychainItemInsert$kSecACLAuthorizationKeychainItemModify$kSecACLAuthorizationKeychainItemRead$kSecACLAuthorizationLogin$kSecACLAuthorizationMAC$kSecACLAuthorizationPartitionID$kSecACLAuthorizationSign$kSecAttrAccess$kSecAttrAccessControl$kSecAttrAccessGroup$kSecAttrAccessGroupToken$kSecAttrAccessible$kSecAttrAccessibleAfterFirstUnlock$kSecAttrAccessibleAfterFirstUnlockThisDeviceOnly$kSecAttrAccessibleAlways$kSecAttrAccessibleAlwaysThisDeviceOnly$kSecAttrAccessibleWhenPasscodeSetThisDeviceOnly$kSecAttrAccessibleWhenUnlocked$kSecAttrAccessibleWhenUnlockedThisDeviceOnly$kSecAttrAccount$kSecAttrApplicationLabel$kSecAttrApplicationTag$kSecAttrAuthenticationType$kSecAttrAuthenticationTypeDPA$kSecAttrAuthenticationTypeDefault$kSecAttrAuthenticationTypeHTMLForm$kSecAttrAuthenticationTypeHTTPBasic$kSecAttrAuthenticationTypeHTTPDigest$kSecAttrAuthenticationTypeMSN$kSecAttrAuthenticationTypeNTLM$kSecAttrAuthenticationTypeRPA$kSecAttrCanDecrypt$kSecAttrCanDerive$kSecAttrCanEncrypt$kSecAttrCanSign$kSecAttrCanUnwrap$kSecAttrCanVerify$kSecAttrCanWrap$kSecAttrCertificateEncoding$kSecAttrCertificateType$kSecAttrComment$kSecAttrCreationDate$kSecAttrCreator$kSecAttrDescription$kSecAttrEffectiveKeySize$kSecAttrGeneric$kSecAttrIsExtractable$kSecAttrIsInvisible$kSecAttrIsNegative$kSecAttrIsPermanent$kSecAttrIsSensitive$kSecAttrIssuer$kSecAttrKeyClass$kSecAttrKeyClassPrivate$kSecAttrKeyClassPublic$kSecAttrKeyClassSymmetric$kSecAttrKeySizeInBits$kSecAttrKeyType$kSecAttrKeyType3DES$kSecAttrKeyTypeAES$kSecAttrKeyTypeCAST$kSecAttrKeyTypeDES$kSecAttrKeyTypeDSA$kSecAttrKeyTypeEC$kSecAttrKeyTypeECDSA$kSecAttrKeyTypeECSECPrimeRandom$kSecAttrKeyTypeRC2$kSecAttrKeyTypeRC4$kSecAttrKeyTypeRSA$kSecAttrLabel$kSecAttrModificationDate$kSecAttrPRF$kSecAttrPRFHmacAlgSHA1$kSecAttrPRFHmacAlgSHA224$kSecAttrPRFHmacAlgSHA256$kSecAttrPRFHmacAlgSHA384$kSecAttrPRFHmacAlgSHA512$kSecAttrPath$kSecAttrPersistantReference$kSecAttrPersistentReference$kSecAttrPort$kSecAttrProtocol$kSecAttrProtocolAFP$kSecAttrProtocolAppleTalk$kSecAttrProtocolDAAP$kSecAttrProtocolEPPC$kSecAttrProtocolFTP$kSecAttrProtocolFTPAccount$kSecAttrProtocolFTPProxy$kSecAttrProtocolFTPS$kSecAttrProtocolHTTP$kSecAttrProtocolHTTPProxy$kSecAttrProtocolHTTPS$kSecAttrProtocolHTTPSProxy$kSecAttrProtocolIMAP$kSecAttrProtocolIMAPS$kSecAttrProtocolIPP$kSecAttrProtocolIRC$kSecAttrProtocolIRCS$kSecAttrProtocolLDAP$kSecAttrProtocolLDAPS$kSecAttrProtocolNNTP$kSecAttrProtocolNNTPS$kSecAttrProtocolPOP3$kSecAttrProtocolPOP3S$kSecAttrProtocolRTSP$kSecAttrProtocolRTSPProxy$kSecAttrProtocolSMB$kSecAttrProtocolSMTP$kSecAttrProtocolSOCKS$kSecAttrProtocolSSH$kSecAttrProtocolTelnet$kSecAttrProtocolTelnetS$kSecAttrPublicKeyHash$kSecAttrRounds$kSecAttrSalt$kSecAttrSecurityDomain$kSecAttrSerialNumber$kSecAttrServer$kSecAttrService$kSecAttrSubject$kSecAttrSubjectKeyID$kSecAttrSyncViewHint$kSecAttrSynchronizable$kSecAttrSynchronizableAny$kSecAttrTokenID$kSecAttrTokenIDSecureEnclave$kSecAttrType$kSecBase32Encoding$kSecBase64Encoding$kSecCFErrorArchitecture$kSecCFErrorGuestAttributes$kSecCFErrorInfoPlist$kSecCFErrorPath$kSecCFErrorPattern$kSecCFErrorRequirementSyntax$kSecCFErrorResourceAdded$kSecCFErrorResourceAltered$kSecCFErrorResourceMissing$kSecCFErrorResourceSeal$kSecCFErrorResourceSideband$kSecClass$kSecClassCertificate$kSecClassGenericPassword$kSecClassIdentity$kSecClassInternetPassword$kSecClassKey$kSecCodeAttributeArchitecture$kSecCodeAttributeBundleVersion$kSecCodeAttributeSubarchitecture$kSecCodeAttributeUniversalFileOffset$kSecCodeInfoCMS$kSecCodeInfoCdHashes$kSecCodeInfoCertificates$kSecCodeInfoChangedFiles$kSecCodeInfoDesignatedRequirement$kSecCodeInfoDigestAlgorithm$kSecCodeInfoDigestAlgorithms$kSecCodeInfoEntitlements$kSecCodeInfoEntitlementsDict$kSecCodeInfoFlags$kSecCodeInfoFormat$kSecCodeInfoIdentifier$kSecCodeInfoImplicitDesignatedRequirement$kSecCodeInfoMainExecutable$kSecCodeInfoPList$kSecCodeInfoPlatformIdentifier$kSecCodeInfoRequirementData$kSecCodeInfoRequirements$kSecCodeInfoSource$kSecCodeInfoStatus$kSecCodeInfoTeamIdentifier$kSecCodeInfoTime$kSecCodeInfoTimestamp$kSecCodeInfoTrust$kSecCodeInfoUnique$kSecCompressionRatio$kSecDecodeTypeAttribute$kSecDigestHMACKeyAttribute$kSecDigestHMACMD5$kSecDigestHMACSHA1$kSecDigestHMACSHA2$kSecDigestLengthAttribute$kSecDigestMD2$kSecDigestMD4$kSecDigestMD5$kSecDigestSHA1$kSecDigestSHA2$kSecDigestTypeAttribute$kSecEncodeLineLengthAttribute$kSecEncodeTypeAttribute$kSecEncryptKey$kSecEncryptionMode$kSecGuestAttributeArchitecture$kSecGuestAttributeAudit$kSecGuestAttributeCanonical$kSecGuestAttributeDynamicCode$kSecGuestAttributeDynamicCodeInfoPlist$kSecGuestAttributeHash$kSecGuestAttributeMachPort$kSecGuestAttributePid$kSecGuestAttributeSubarchitecture$kSecIVKey$kSecIdentityDomainDefault$kSecIdentityDomainKerberosKDC$kSecImportExportAccess$kSecImportExportKeychain$kSecImportExportPassphrase$kSecImportItemCertChain$kSecImportItemIdentity$kSecImportItemKeyID$kSecImportItemLabel$kSecImportItemTrust$kSecInputIsAttributeName$kSecInputIsDigest$kSecInputIsPlainText$kSecInputIsRaw$kSecKeyAlgorithmECDHKeyExchangeCofactor$kSecKeyAlgorithmECDHKeyExchangeCofactorX963SHA1$kSecKeyAlgorithmECDHKeyExchangeCofactorX963SHA224$kSecKeyAlgorithmECDHKeyExchangeCofactorX963SHA256$kSecKeyAlgorithmECDHKeyExchangeCofactorX963SHA384$kSecKeyAlgorithmECDHKeyExchangeCofactorX963SHA512$kSecKeyAlgorithmECDHKeyExchangeStandard$kSecKeyAlgorithmECDHKeyExchangeStandardX963SHA1$kSecKeyAlgorithmECDHKeyExchangeStandardX963SHA224$kSecKeyAlgorithmECDHKeyExchangeStandardX963SHA256$kSecKeyAlgorithmECDHKeyExchangeStandardX963SHA384$kSecKeyAlgorithmECDHKeyExchangeStandardX963SHA512$kSecKeyAlgorithmECDSASignatureDigestX962$kSecKeyAlgorithmECDSASignatureDigestX962SHA1$kSecKeyAlgorithmECDSASignatureDigestX962SHA224$kSecKeyAlgorithmECDSASignatureDigestX962SHA256$kSecKeyAlgorithmECDSASignatureDigestX962SHA384$kSecKeyAlgorithmECDSASignatureDigestX962SHA512$kSecKeyAlgorithmECDSASignatureMessageX962SHA1$kSecKeyAlgorithmECDSASignatureMessageX962SHA224$kSecKeyAlgorithmECDSASignatureMessageX962SHA256$kSecKeyAlgorithmECDSASignatureMessageX962SHA384$kSecKeyAlgorithmECDSASignatureMessageX962SHA512$kSecKeyAlgorithmECDSASignatureRFC4754$kSecKeyAlgorithmECIESEncryptionCofactorVariableIVX963SHA224AESGCM$kSecKeyAlgorithmECIESEncryptionCofactorVariableIVX963SHA256AESGCM$kSecKeyAlgorithmECIESEncryptionCofactorVariableIVX963SHA384AESGCM$kSecKeyAlgorithmECIESEncryptionCofactorVariableIVX963SHA512AESGCM$kSecKeyAlgorithmECIESEncryptionCofactorX963SHA1AESGCM$kSecKeyAlgorithmECIESEncryptionCofactorX963SHA224AESGCM$kSecKeyAlgorithmECIESEncryptionCofactorX963SHA256AESGCM$kSecKeyAlgorithmECIESEncryptionCofactorX963SHA384AESGCM$kSecKeyAlgorithmECIESEncryptionCofactorX963SHA512AESGCM$kSecKeyAlgorithmECIESEncryptionStandardVariableIVX963SHA224AESGCM$kSecKeyAlgorithmECIESEncryptionStandardVariableIVX963SHA256AESGCM$kSecKeyAlgorithmECIESEncryptionStandardVariableIVX963SHA384AESGCM$kSecKeyAlgorithmECIESEncryptionStandardVariableIVX963SHA512AESGCM$kSecKeyAlgorithmECIESEncryptionStandardX963SHA1AESGCM$kSecKeyAlgorithmECIESEncryptionStandardX963SHA224AESGCM$kSecKeyAlgorithmECIESEncryptionStandardX963SHA256AESGCM$kSecKeyAlgorithmECIESEncryptionStandardX963SHA384AESGCM$kSecKeyAlgorithmECIESEncryptionStandardX963SHA512AESGCM$kSecKeyAlgorithmRSAEncryptionOAEPSHA1$kSecKeyAlgorithmRSAEncryptionOAEPSHA1AESGCM$kSecKeyAlgorithmRSAEncryptionOAEPSHA224$kSecKeyAlgorithmRSAEncryptionOAEPSHA224AESGCM$kSecKeyAlgorithmRSAEncryptionOAEPSHA256$kSecKeyAlgorithmRSAEncryptionOAEPSHA256AESGCM$kSecKeyAlgorithmRSAEncryptionOAEPSHA384$kSecKeyAlgorithmRSAEncryptionOAEPSHA384AESGCM$kSecKeyAlgorithmRSAEncryptionOAEPSHA512$kSecKeyAlgorithmRSAEncryptionOAEPSHA512AESGCM$kSecKeyAlgorithmRSAEncryptionPKCS1$kSecKeyAlgorithmRSAEncryptionRaw$kSecKeyAlgorithmRSASignatureDigestPKCS1v15Raw$kSecKeyAlgorithmRSASignatureDigestPKCS1v15SHA1$kSecKeyAlgorithmRSASignatureDigestPKCS1v15SHA224$kSecKeyAlgorithmRSASignatureDigestPKCS1v15SHA256$kSecKeyAlgorithmRSASignatureDigestPKCS1v15SHA384$kSecKeyAlgorithmRSASignatureDigestPKCS1v15SHA512$kSecKeyAlgorithmRSASignatureDigestPSSSHA1$kSecKeyAlgorithmRSASignatureDigestPSSSHA224$kSecKeyAlgorithmRSASignatureDigestPSSSHA256$kSecKeyAlgorithmRSASignatureDigestPSSSHA384$kSecKeyAlgorithmRSASignatureDigestPSSSHA512$kSecKeyAlgorithmRSASignatureMessagePKCS1v15SHA1$kSecKeyAlgorithmRSASignatureMessagePKCS1v15SHA224$kSecKeyAlgorithmRSASignatureMessagePKCS1v15SHA256$kSecKeyAlgorithmRSASignatureMessagePKCS1v15SHA384$kSecKeyAlgorithmRSASignatureMessagePKCS1v15SHA512$kSecKeyAlgorithmRSASignatureMessagePSSSHA1$kSecKeyAlgorithmRSASignatureMessagePSSSHA224$kSecKeyAlgorithmRSASignatureMessagePSSSHA256$kSecKeyAlgorithmRSASignatureMessagePSSSHA384$kSecKeyAlgorithmRSASignatureMessagePSSSHA512$kSecKeyAlgorithmRSASignatureRaw$kSecKeyAttributeName$kSecKeyKeyExchangeParameterRequestedSize$kSecKeyKeyExchangeParameterSharedInfo$kSecLineLength64$kSecLineLength76$kSecMatchCaseInsensitive$kSecMatchDiacriticInsensitive$kSecMatchEmailAddressIfPresent$kSecMatchIssuers$kSecMatchItemList$kSecMatchLimit$kSecMatchLimitAll$kSecMatchLimitOne$kSecMatchPolicy$kSecMatchSearchList$kSecMatchSubjectContains$kSecMatchSubjectEndsWith$kSecMatchSubjectStartsWith$kSecMatchSubjectWholeString$kSecMatchTrustedOnly$kSecMatchValidOnDate$kSecMatchWidthInsensitive$kSecModeCBCKey$kSecModeCFBKey$kSecModeECBKey$kSecModeNoneKey$kSecModeOFBKey$kSecOAEPEncodingParametersAttributeName$kSecOAEPMGF1DigestAlgorithmAttributeName$kSecOAEPMessageLengthAttributeName$kSecOIDADC_CERT_POLICY$kSecOIDAPPLE_CERT_POLICY$kSecOIDAPPLE_EKU_CODE_SIGNING$kSecOIDAPPLE_EKU_CODE_SIGNING_DEV$kSecOIDAPPLE_EKU_ICHAT_ENCRYPTION$kSecOIDAPPLE_EKU_ICHAT_SIGNING$kSecOIDAPPLE_EKU_RESOURCE_SIGNING$kSecOIDAPPLE_EKU_SYSTEM_IDENTITY$kSecOIDAPPLE_EXTENSION$kSecOIDAPPLE_EXTENSION_AAI_INTERMEDIATE$kSecOIDAPPLE_EXTENSION_ADC_APPLE_SIGNING$kSecOIDAPPLE_EXTENSION_ADC_DEV_SIGNING$kSecOIDAPPLE_EXTENSION_APPLEID_INTERMEDIATE$kSecOIDAPPLE_EXTENSION_APPLE_SIGNING$kSecOIDAPPLE_EXTENSION_CODE_SIGNING$kSecOIDAPPLE_EXTENSION_INTERMEDIATE_MARKER$kSecOIDAPPLE_EXTENSION_ITMS_INTERMEDIATE$kSecOIDAPPLE_EXTENSION_WWDR_INTERMEDIATE$kSecOIDAuthorityInfoAccess$kSecOIDAuthorityKeyIdentifier$kSecOIDBasicConstraints$kSecOIDBiometricInfo$kSecOIDCSSMKeyStruct$kSecOIDCertIssuer$kSecOIDCertificatePolicies$kSecOIDClientAuth$kSecOIDCollectiveStateProvinceName$kSecOIDCollectiveStreetAddress$kSecOIDCommonName$kSecOIDCountryName$kSecOIDCrlDistributionPoints$kSecOIDCrlNumber$kSecOIDCrlReason$kSecOIDDOTMAC_CERT_EMAIL_ENCRYPT$kSecOIDDOTMAC_CERT_EMAIL_SIGN$kSecOIDDOTMAC_CERT_EXTENSION$kSecOIDDOTMAC_CERT_IDENTITY$kSecOIDDOTMAC_CERT_POLICY$kSecOIDDeltaCrlIndicator$kSecOIDDescription$kSecOIDEKU_IPSec$kSecOIDEmailAddress$kSecOIDEmailProtection$kSecOIDExtendedKeyUsage$kSecOIDExtendedKeyUsageAny$kSecOIDExtendedUseCodeSigning$kSecOIDGivenName$kSecOIDHoldInstructionCode$kSecOIDInvalidityDate$kSecOIDIssuerAltName$kSecOIDIssuingDistributionPoint$kSecOIDIssuingDistributionPoints$kSecOIDKERBv5_PKINIT_KP_CLIENT_AUTH$kSecOIDKERBv5_PKINIT_KP_KDC$kSecOIDKeyUsage$kSecOIDLocalityName$kSecOIDMS_NTPrincipalName$kSecOIDMicrosoftSGC$kSecOIDNameConstraints$kSecOIDNetscapeCertSequence$kSecOIDNetscapeCertType$kSecOIDNetscapeSGC$kSecOIDOCSPSigning$kSecOIDOrganizationName$kSecOIDOrganizationalUnitName$kSecOIDPolicyConstraints$kSecOIDPolicyMappings$kSecOIDPrivateKeyUsagePeriod$kSecOIDQC_Statements$kSecOIDSRVName$kSecOIDSerialNumber$kSecOIDServerAuth$kSecOIDStateProvinceName$kSecOIDStreetAddress$kSecOIDSubjectAltName$kSecOIDSubjectDirectoryAttributes$kSecOIDSubjectEmailAddress$kSecOIDSubjectInfoAccess$kSecOIDSubjectKeyIdentifier$kSecOIDSubjectPicture$kSecOIDSubjectSignatureBitmap$kSecOIDSurname$kSecOIDTimeStamping$kSecOIDTitle$kSecOIDUseExemptions$kSecOIDX509V1CertificateIssuerUniqueId$kSecOIDX509V1CertificateSubjectUniqueId$kSecOIDX509V1IssuerName$kSecOIDX509V1IssuerNameCStruct$kSecOIDX509V1IssuerNameLDAP$kSecOIDX509V1IssuerNameStd$kSecOIDX509V1SerialNumber$kSecOIDX509V1Signature$kSecOIDX509V1SignatureAlgorithm$kSecOIDX509V1SignatureAlgorithmParameters$kSecOIDX509V1SignatureAlgorithmTBS$kSecOIDX509V1SignatureCStruct$kSecOIDX509V1SignatureStruct$kSecOIDX509V1SubjectName$kSecOIDX509V1SubjectNameCStruct$kSecOIDX509V1SubjectNameLDAP$kSecOIDX509V1SubjectNameStd$kSecOIDX509V1SubjectPublicKey$kSecOIDX509V1SubjectPublicKeyAlgorithm$kSecOIDX509V1SubjectPublicKeyAlgorithmParameters$kSecOIDX509V1SubjectPublicKeyCStruct$kSecOIDX509V1ValidityNotAfter$kSecOIDX509V1ValidityNotBefore$kSecOIDX509V1Version$kSecOIDX509V3Certificate$kSecOIDX509V3CertificateCStruct$kSecOIDX509V3CertificateExtensionCStruct$kSecOIDX509V3CertificateExtensionCritical$kSecOIDX509V3CertificateExtensionId$kSecOIDX509V3CertificateExtensionStruct$kSecOIDX509V3CertificateExtensionType$kSecOIDX509V3CertificateExtensionValue$kSecOIDX509V3CertificateExtensionsCStruct$kSecOIDX509V3CertificateExtensionsStruct$kSecOIDX509V3CertificateNumberOfExtensions$kSecOIDX509V3SignedCertificate$kSecOIDX509V3SignedCertificateCStruct$kSecPaddingKey$kSecPaddingNoneKey$kSecPaddingOAEPKey$kSecPaddingPKCS1Key$kSecPaddingPKCS5Key$kSecPaddingPKCS7Key$kSecPolicyAppleCodeSigning$kSecPolicyAppleEAP$kSecPolicyAppleIDValidation$kSecPolicyAppleIPsec$kSecPolicyApplePKINITClient$kSecPolicyApplePKINITServer$kSecPolicyApplePassbookSigning$kSecPolicyApplePayIssuerEncryption$kSecPolicyAppleRevocation$kSecPolicyAppleSMIME$kSecPolicyAppleSSL$kSecPolicyAppleTimeStamping$kSecPolicyAppleX509Basic$kSecPolicyAppleiChat$kSecPolicyClient$kSecPolicyKU_CRLSign$kSecPolicyKU_DataEncipherment$kSecPolicyKU_DecipherOnly$kSecPolicyKU_DigitalSignature$kSecPolicyKU_EncipherOnly$kSecPolicyKU_KeyAgreement$kSecPolicyKU_KeyCertSign$kSecPolicyKU_KeyEncipherment$kSecPolicyKU_NonRepudiation$kSecPolicyMacAppStoreReceipt$kSecPolicyName$kSecPolicyOid$kSecPolicyRevocationFlags$kSecPolicyTeamIdentifier$kSecPrivateKeyAttrs$kSecPropertyKeyLabel$kSecPropertyKeyLocalizedLabel$kSecPropertyKeyType$kSecPropertyKeyValue$kSecPropertyTypeArray$kSecPropertyTypeData$kSecPropertyTypeDate$kSecPropertyTypeError$kSecPropertyTypeNumber$kSecPropertyTypeSection$kSecPropertyTypeString$kSecPropertyTypeSuccess$kSecPropertyTypeTitle$kSecPropertyTypeURL$kSecPropertyTypeWarning$kSecPublicKeyAttrs$kSecRandomDefault$kSecReturnAttributes$kSecReturnData$kSecReturnPersistentRef$kSecReturnRef$kSecSignatureAttributeName$kSecTransformAbortAttributeName$kSecTransformAbortOriginatorKey$kSecTransformActionAttributeNotification$kSecTransformActionAttributeValidation$kSecTransformActionCanExecute$kSecTransformActionExternalizeExtraData$kSecTransformActionFinalize$kSecTransformActionInternalizeExtraData$kSecTransformActionProcessData$kSecTransformActionStartingExecution$kSecTransformDebugAttributeName$kSecTransformErrorDomain$kSecTransformInputAttributeName$kSecTransformOutputAttributeName$kSecTransformPreviousErrorKey$kSecTransformTransformName$kSecTrustCertificateTransparency$kSecTrustCertificateTransparencyWhiteList$kSecTrustEvaluationDate$kSecTrustExtendedValidation$kSecTrustOrganizationName$kSecTrustResultValue$kSecTrustRevocationChecked$kSecTrustRevocationValidUntilDate$kSecUseAuthenticationContext$kSecUseAuthenticationUI$kSecUseAuthenticationUIAllow$kSecUseAuthenticationUIFail$kSecUseAuthenticationUISkip$kSecUseDataProtectionKeychain$kSecUseItemList$kSecUseKeychain$kSecUseNoAuthenticationUI$kSecUseOperationPrompt$kSecValueData$kSecValuePersistentRef$kSecValueRef$kSecZLibEncoding$"""
enums = """$SEC_KEYCHAIN_SETTINGS_VERS1@1$SEC_KEY_IMPORT_EXPORT_PARAMS_VERSION@0$SSL_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA@17$SSL_DHE_DSS_WITH_3DES_EDE_CBC_SHA@19$SSL_DHE_DSS_WITH_DES_CBC_SHA@18$SSL_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA@20$SSL_DHE_RSA_WITH_3DES_EDE_CBC_SHA@22$SSL_DHE_RSA_WITH_DES_CBC_SHA@21$SSL_DH_DSS_EXPORT_WITH_DES40_CBC_SHA@11$SSL_DH_DSS_WITH_3DES_EDE_CBC_SHA@13$SSL_DH_DSS_WITH_DES_CBC_SHA@12$SSL_DH_RSA_EXPORT_WITH_DES40_CBC_SHA@14$SSL_DH_RSA_WITH_3DES_EDE_CBC_SHA@16$SSL_DH_RSA_WITH_DES_CBC_SHA@15$SSL_DH_anon_EXPORT_WITH_DES40_CBC_SHA@25$SSL_DH_anon_EXPORT_WITH_RC4_40_MD5@23$SSL_DH_anon_WITH_3DES_EDE_CBC_SHA@27$SSL_DH_anon_WITH_DES_CBC_SHA@26$SSL_DH_anon_WITH_RC4_128_MD5@24$SSL_FORTEZZA_DMS_WITH_FORTEZZA_CBC_SHA@29$SSL_FORTEZZA_DMS_WITH_NULL_SHA@28$SSL_NO_SUCH_CIPHERSUITE@65535$SSL_NULL_WITH_NULL_NULL@0$SSL_RSA_EXPORT_WITH_DES40_CBC_SHA@8$SSL_RSA_EXPORT_WITH_RC2_CBC_40_MD5@6$SSL_RSA_EXPORT_WITH_RC4_40_MD5@3$SSL_RSA_WITH_3DES_EDE_CBC_MD5@65411$SSL_RSA_WITH_3DES_EDE_CBC_SHA@10$SSL_RSA_WITH_DES_CBC_MD5@65410$SSL_RSA_WITH_DES_CBC_SHA@9$SSL_RSA_WITH_IDEA_CBC_MD5@65409$SSL_RSA_WITH_IDEA_CBC_SHA@7$SSL_RSA_WITH_NULL_MD5@1$SSL_RSA_WITH_NULL_SHA@2$SSL_RSA_WITH_RC2_CBC_MD5@65408$SSL_RSA_WITH_RC4_128_MD5@4$SSL_RSA_WITH_RC4_128_SHA@5$TLS_AES_128_CCM_8_SHA256@4869$TLS_AES_128_CCM_SHA256@4868$TLS_AES_128_GCM_SHA256@4865$TLS_AES_256_GCM_SHA384@4866$TLS_CHACHA20_POLY1305_SHA256@4867$TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA@19$TLS_DHE_DSS_WITH_AES_128_CBC_SHA@50$TLS_DHE_DSS_WITH_AES_128_CBC_SHA256@64$TLS_DHE_DSS_WITH_AES_128_GCM_SHA256@162$TLS_DHE_DSS_WITH_AES_256_CBC_SHA@56$TLS_DHE_DSS_WITH_AES_256_CBC_SHA256@106$TLS_DHE_DSS_WITH_AES_256_GCM_SHA384@163$TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA@143$TLS_DHE_PSK_WITH_AES_128_CBC_SHA@144$TLS_DHE_PSK_WITH_AES_128_CBC_SHA256@178$TLS_DHE_PSK_WITH_AES_128_GCM_SHA256@170$TLS_DHE_PSK_WITH_AES_256_CBC_SHA@145$TLS_DHE_PSK_WITH_AES_256_CBC_SHA384@179$TLS_DHE_PSK_WITH_AES_256_GCM_SHA384@171$TLS_DHE_PSK_WITH_NULL_SHA@45$TLS_DHE_PSK_WITH_NULL_SHA256@180$TLS_DHE_PSK_WITH_NULL_SHA384@181$TLS_DHE_PSK_WITH_RC4_128_SHA@142$TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA@22$TLS_DHE_RSA_WITH_AES_128_CBC_SHA@51$TLS_DHE_RSA_WITH_AES_128_CBC_SHA256@103$TLS_DHE_RSA_WITH_AES_128_GCM_SHA256@158$TLS_DHE_RSA_WITH_AES_256_CBC_SHA@57$TLS_DHE_RSA_WITH_AES_256_CBC_SHA256@107$TLS_DHE_RSA_WITH_AES_256_GCM_SHA384@159$TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA@13$TLS_DH_DSS_WITH_AES_128_CBC_SHA@48$TLS_DH_DSS_WITH_AES_128_CBC_SHA256@62$TLS_DH_DSS_WITH_AES_128_GCM_SHA256@164$TLS_DH_DSS_WITH_AES_256_CBC_SHA@54$TLS_DH_DSS_WITH_AES_256_CBC_SHA256@104$TLS_DH_DSS_WITH_AES_256_GCM_SHA384@165$TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA@16$TLS_DH_RSA_WITH_AES_128_CBC_SHA@49$TLS_DH_RSA_WITH_AES_128_CBC_SHA256@63$TLS_DH_RSA_WITH_AES_128_GCM_SHA256@160$TLS_DH_RSA_WITH_AES_256_CBC_SHA@55$TLS_DH_RSA_WITH_AES_256_CBC_SHA256@105$TLS_DH_RSA_WITH_AES_256_GCM_SHA384@161$TLS_DH_anon_WITH_3DES_EDE_CBC_SHA@27$TLS_DH_anon_WITH_AES_128_CBC_SHA@52$TLS_DH_anon_WITH_AES_128_CBC_SHA256@108$TLS_DH_anon_WITH_AES_128_GCM_SHA256@166$TLS_DH_anon_WITH_AES_256_CBC_SHA@58$TLS_DH_anon_WITH_AES_256_CBC_SHA256@109$TLS_DH_anon_WITH_AES_256_GCM_SHA384@167$TLS_DH_anon_WITH_RC4_128_MD5@24$TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA@49160$TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA@49161$TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256@49187$TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256@49195$TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA@49162$TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384@49188$TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384@49196$TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256@52393$TLS_ECDHE_ECDSA_WITH_NULL_SHA@49158$TLS_ECDHE_ECDSA_WITH_RC4_128_SHA@49159$TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA@49205$TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA@49206$TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA@49170$TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA@49171$TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256@49191$TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256@49199$TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA@49172$TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384@49192$TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384@49200$TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256@52392$TLS_ECDHE_RSA_WITH_NULL_SHA@49168$TLS_ECDHE_RSA_WITH_RC4_128_SHA@49169$TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA@49155$TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA@49156$TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256@49189$TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256@49197$TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA@49157$TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384@49190$TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384@49198$TLS_ECDH_ECDSA_WITH_NULL_SHA@49153$TLS_ECDH_ECDSA_WITH_RC4_128_SHA@49154$TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA@49165$TLS_ECDH_RSA_WITH_AES_128_CBC_SHA@49166$TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256@49193$TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256@49201$TLS_ECDH_RSA_WITH_AES_256_CBC_SHA@49167$TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384@49194$TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384@49202$TLS_ECDH_RSA_WITH_NULL_SHA@49163$TLS_ECDH_RSA_WITH_RC4_128_SHA@49164$TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA@49175$TLS_ECDH_anon_WITH_AES_128_CBC_SHA@49176$TLS_ECDH_anon_WITH_AES_256_CBC_SHA@49177$TLS_ECDH_anon_WITH_NULL_SHA@49173$TLS_ECDH_anon_WITH_RC4_128_SHA@49174$TLS_EMPTY_RENEGOTIATION_INFO_SCSV@255$TLS_NULL_WITH_NULL_NULL@0$TLS_PSK_WITH_3DES_EDE_CBC_SHA@139$TLS_PSK_WITH_AES_128_CBC_SHA@140$TLS_PSK_WITH_AES_128_CBC_SHA256@174$TLS_PSK_WITH_AES_128_GCM_SHA256@168$TLS_PSK_WITH_AES_256_CBC_SHA@141$TLS_PSK_WITH_AES_256_CBC_SHA384@175$TLS_PSK_WITH_AES_256_GCM_SHA384@169$TLS_PSK_WITH_CHACHA20_POLY1305_SHA256@52395$TLS_PSK_WITH_NULL_SHA@44$TLS_PSK_WITH_NULL_SHA256@176$TLS_PSK_WITH_NULL_SHA384@177$TLS_PSK_WITH_RC4_128_SHA@138$TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA@147$TLS_RSA_PSK_WITH_AES_128_CBC_SHA@148$TLS_RSA_PSK_WITH_AES_128_CBC_SHA256@182$TLS_RSA_PSK_WITH_AES_128_GCM_SHA256@172$TLS_RSA_PSK_WITH_AES_256_CBC_SHA@149$TLS_RSA_PSK_WITH_AES_256_CBC_SHA384@183$TLS_RSA_PSK_WITH_AES_256_GCM_SHA384@173$TLS_RSA_PSK_WITH_NULL_SHA@46$TLS_RSA_PSK_WITH_NULL_SHA256@184$TLS_RSA_PSK_WITH_NULL_SHA384@185$TLS_RSA_PSK_WITH_RC4_128_SHA@146$TLS_RSA_WITH_3DES_EDE_CBC_SHA@10$TLS_RSA_WITH_AES_128_CBC_SHA@47$TLS_RSA_WITH_AES_128_CBC_SHA256@60$TLS_RSA_WITH_AES_128_GCM_SHA256@156$TLS_RSA_WITH_AES_256_CBC_SHA@53$TLS_RSA_WITH_AES_256_CBC_SHA256@61$TLS_RSA_WITH_AES_256_GCM_SHA384@157$TLS_RSA_WITH_NULL_MD5@1$TLS_RSA_WITH_NULL_SHA@2$TLS_RSA_WITH_NULL_SHA256@59$TLS_RSA_WITH_RC4_128_MD5@4$TLS_RSA_WITH_RC4_128_SHA@5$callerSecuritySession@-1$errAuthorizationBadAddress@-60033$errAuthorizationCanceled@-60006$errAuthorizationDenied@-60005$errAuthorizationExternalizeNotAllowed@-60009$errAuthorizationInteractionNotAllowed@-60007$errAuthorizationInternal@-60008$errAuthorizationInternalizeNotAllowed@-60010$errAuthorizationInvalidFlags@-60011$errAuthorizationInvalidPointer@-60004$errAuthorizationInvalidRef@-60002$errAuthorizationInvalidSet@-60001$errAuthorizationInvalidTag@-60003$errAuthorizationSuccess@0$errAuthorizationToolEnvironmentError@-60032$errAuthorizationToolExecuteFailure@-60031$errSSLATSCertificateHashAlgorithmViolation@-9885$errSSLATSCertificateTrustViolation@-9886$errSSLATSCiphersuiteViolation@-9882$errSSLATSLeafCertificateHashAlgorithmViolation@-9884$errSSLATSMinimumKeySizeViolation@-9883$errSSLATSMinimumVersionViolation@-9881$errSSLATSViolation@-9880$errSSLBadCert@-9808$errSSLBadCertificateStatusResponse@-9862$errSSLBadCipherSuite@-9818$errSSLBadConfiguration@-9848$errSSLBadRecordMac@-9846$errSSLBufferOverflow@-9817$errSSLCertExpired@-9814$errSSLCertNotYetValid@-9815$errSSLCertificateRequired@-9863$errSSLClientCertRequested@-9842$errSSLClientHelloReceived@-9851$errSSLClosedAbort@-9806$errSSLClosedGraceful@-9805$errSSLClosedNoNotify@-9816$errSSLConfigurationFailed@-9854$errSSLConnectionRefused@-9844$errSSLCrypto@-9809$errSSLDecodeError@-9859$errSSLDecompressFail@-9857$errSSLDecryptionFail@-9845$errSSLFatalAlert@-9802$errSSLHandshakeFail@-9858$errSSLHostNameMismatch@-9843$errSSLIllegalParam@-9830$errSSLInappropriateFallback@-9860$errSSLInternal@-9810$errSSLMissingExtension@-9861$errSSLModuleAttach@-9811$errSSLNegotiation@-9801$errSSLNetworkTimeout@-9853$errSSLNoRootCert@-9813$errSSLPeerAccessDenied@-9832$errSSLPeerAuthCompleted@-9841$errSSLPeerBadCert@-9825$errSSLPeerBadRecordMac@-9820$errSSLPeerCertExpired@-9828$errSSLPeerCertRevoked@-9827$errSSLPeerCertUnknown@-9829$errSSLPeerDecodeError@-9833$errSSLPeerDecompressFail@-9823$errSSLPeerDecryptError@-9834$errSSLPeerDecryptionFail@-9821$errSSLPeerExportRestriction@-9835$errSSLPeerHandshakeFail@-9824$errSSLPeerInsufficientSecurity@-9837$errSSLPeerInternalError@-9838$errSSLPeerNoRenegotiation@-9840$errSSLPeerProtocolVersion@-9836$errSSLPeerRecordOverflow@-9822$errSSLPeerUnexpectedMsg@-9819$errSSLPeerUnknownCA@-9831$errSSLPeerUnsupportedCert@-9826$errSSLPeerUserCancelled@-9839$errSSLProtocol@-9800$errSSLRecordOverflow@-9847$errSSLSessionNotFound@-9804$errSSLTransportReset@-9852$errSSLUnexpectedMessage@-9856$errSSLUnexpectedRecord@-9849$errSSLUnknownPSKIdentity@-9864$errSSLUnknownRootCert@-9812$errSSLUnrecognizedName@-9865$errSSLUnsupportedExtension@-9855$errSSLWeakPeerEphemeralDHKey@-9850$errSSLWouldBlock@-9803$errSSLXCertChainInvalid@-9807$errSecACLAddFailed@-67698$errSecACLChangeFailed@-67699$errSecACLDeleteFailed@-67696$errSecACLNotSimple@-25240$errSecACLReplaceFailed@-67697$errSecAddinLoadFailed@-67711$errSecAddinUnloadFailed@-67714$errSecAlgorithmMismatch@-67730$errSecAllocate@-108$errSecAlreadyLoggedIn@-67814$errSecAppleAddAppACLSubject@-67589$errSecAppleInvalidKeyEndDate@-67593$errSecAppleInvalidKeyStartDate@-67592$errSecApplePublicKeyIncomplete@-67590$errSecAppleSSLv2Rollback@-67595$errSecAppleSignatureMismatch@-67591$errSecAttachHandleBusy@-67728$errSecAttributeNotInContext@-67720$errSecAuthFailed@-25293$errSecBadReq@-909$errSecBlockSizeMismatch@-67810$errSecBufferTooSmall@-25301$errSecCRLAlreadySigned@-67684$errSecCRLBadURI@-67617$errSecCRLExpired@-67613$errSecCRLNotFound@-67615$errSecCRLNotTrusted@-67620$errSecCRLNotValidYet@-67614$errSecCRLPolicyFailed@-67621$errSecCRLServerDown@-67616$errSecCSAmbiguousBundleFormat@-67011$errSecCSBadBundleFormat@-67028$errSecCSBadCallbackValue@-67020$errSecCSBadDictionaryFormat@-67058$errSecCSBadDiskImageFormat@-67001$errSecCSBadFrameworkVersion@-67009$errSecCSBadLVArch@-67017$errSecCSBadMainExecutable@-67010$errSecCSBadNestedCode@-67021$errSecCSBadObjectFormat@-67049$errSecCSBadResource@-67054$errSecCSBadTeamIdentifier@-66997$errSecCSCMSTooLarge@-67036$errSecCSCancelled@-67006$errSecCSDBAccess@-67032$errSecCSDBDenied@-67033$errSecCSDSStoreSymlink@-67012$errSecCSDbCorrupt@-67024$errSecCSFileHardQuarantined@-67026$errSecCSGuestInvalid@-67063$errSecCSHelperFailed@-67019$errSecCSHostProtocolContradiction@-67043$errSecCSHostProtocolDedicationError@-67042$errSecCSHostProtocolInvalidAttribute@-67031$errSecCSHostProtocolInvalidHash@-67035$errSecCSHostProtocolNotProxy@-67041$errSecCSHostProtocolRelativePath@-67044$errSecCSHostProtocolStateError@-67040$errSecCSHostProtocolUnrelated@-67039$errSecCSHostReject@-67047$errSecCSInfoPlistFailed@-67030$errSecCSInternalError@-67048$errSecCSInvalidAssociatedFileData@-66999$errSecCSInvalidAttributeValues@-67066$errSecCSInvalidEntitlements@-66994$errSecCSInvalidFlags@-67070$errSecCSInvalidObjectRef@-67071$errSecCSInvalidPlatform@-67005$errSecCSInvalidRuntimeVersion@-66993$errSecCSInvalidSymlink@-67003$errSecCSInvalidTeamIdentifier@-66998$errSecCSMultipleGuests@-67064$errSecCSNoMainExecutable@-67029$errSecCSNoMatches@-67027$errSecCSNoSuchCode@-67065$errSecCSNotAHost@-67046$errSecCSNotAppLike@-67002$errSecCSNotSupported@-67037$errSecCSObjectRequired@-67069$errSecCSOutdated@-67025$errSecCSRegularFile@-67015$errSecCSReqFailed@-67050$errSecCSReqInvalid@-67052$errSecCSReqUnsupported@-67051$errSecCSResourceDirectoryFailed@-67023$errSecCSResourceNotSupported@-67016$errSecCSResourceRulesInvalid@-67053$errSecCSResourcesInvalid@-67055$errSecCSResourcesNotFound@-67056$errSecCSResourcesNotSealed@-67057$errSecCSRevokedNotarization@-66992$errSecCSSignatureFailed@-67061$errSecCSSignatureInvalid@-67045$errSecCSSignatureNotVerifiable@-67060$errSecCSSignatureUnsupported@-67059$errSecCSSignatureUntrusted@-66996$errSecCSStaticCodeChanged@-67034$errSecCSStaticCodeNotFound@-67068$errSecCSTooBig@-67004$errSecCSUnimplemented@-67072$errSecCSUnsealedAppRoot@-67014$errSecCSUnsealedFrameworkRoot@-67008$errSecCSUnsigned@-67062$errSecCSUnsignedNestedCode@-67022$errSecCSUnsupportedDigestAlgorithm@-67000$errSecCSUnsupportedGuestAttributes@-67067$errSecCSVetoed@-67018$errSecCSWeakResourceEnvelope@-67007$errSecCSWeakResourceRules@-67013$errSecCallbackFailed@-67695$errSecCertificateCannotOperate@-67817$errSecCertificateExpired@-67818$errSecCertificateNameNotAllowed@-67900$errSecCertificateNotValidYet@-67819$errSecCertificatePolicyNotAllowed@-67899$errSecCertificateRevoked@-67820$errSecCertificateSuspended@-67821$errSecCertificateValidityPeriodTooLong@-67901$errSecCodeSigningBadCertChainLength@-67647$errSecCodeSigningBadPathLengthConstraint@-67649$errSecCodeSigningDevelopment@-67651$errSecCodeSigningNoBasicConstraints@-67648$errSecCodeSigningNoExtendedKeyUsage@-67650$errSecConversionError@-67594$errSecCoreFoundationUnknown@-4960$errSecCreateChainFailed@-25318$errSecDataNotAvailable@-25316$errSecDataNotModifiable@-25317$errSecDataTooLarge@-25302$errSecDatabaseLocked@-67869$errSecDatastoreIsOpen@-67870$errSecDecode@-26275$errSecDeviceError@-67727$errSecDeviceFailed@-67588$errSecDeviceReset@-67587$errSecDeviceVerifyFailed@-67812$errSecDiskFull@-34$errSecDskFull@-34$errSecDuplicateCallback@-25297$errSecDuplicateItem@-25299$errSecDuplicateKeychain@-25296$errSecEMMLoadFailed@-67709$errSecEMMUnloadFailed@-67710$errSecEndOfData@-67634$errSecEventNotificationCallbackNotFound@-67723$errSecExtendedKeyUsageNotCritical@-67881$errSecFieldSpecifiedMultiple@-67866$errSecFileTooBig@-67597$errSecFunctionFailed@-67677$errSecFunctionIntegrityFail@-67670$errSecHostNameMismatch@-67602$errSecIDPFailure@-67622$errSecIO@-36$errSecInDarkWake@-25320$errSecIncompatibleDatabaseBlob@-67600$errSecIncompatibleFieldFormat@-67867$errSecIncompatibleKeyBlob@-67601$errSecIncompatibleVersion@-67704$errSecIncompleteCertRevocationCheck@-67635$errSecInputLengthError@-67724$errSecInsufficientClientID@-67586$errSecInsufficientCredentials@-67822$errSecInteractionNotAllowed@-25308$errSecInteractionRequired@-25315$errSecInternalComponent@-2070$errSecInternalError@-67671$errSecInvaldCRLAuthority@-67827$errSecInvalidACL@-67702$errSecInvalidAccessCredentials@-67700$errSecInvalidAccessRequest@-67876$errSecInvalidAction@-67823$errSecInvalidAddinFunctionTable@-67716$errSecInvalidAlgorithm@-67747$errSecInvalidAlgorithmParms@-67770$errSecInvalidAttributeAccessCredentials@-67796$errSecInvalidAttributeBase@-67788$errSecInvalidAttributeBlockSize@-67764$errSecInvalidAttributeDLDBHandle@-67794$errSecInvalidAttributeEffectiveBits@-67778$errSecInvalidAttributeEndDate@-67782$errSecInvalidAttributeInitVector@-67750$errSecInvalidAttributeIterationCount@-67792$errSecInvalidAttributeKey@-67748$errSecInvalidAttributeKeyLength@-67762$errSecInvalidAttributeKeyType@-67774$errSecInvalidAttributeLabel@-67772$errSecInvalidAttributeMode@-67776$errSecInvalidAttributeOutputSize@-67766$errSecInvalidAttributePadding@-67754$errSecInvalidAttributePassphrase@-67760$errSecInvalidAttributePrime@-67786$errSecInvalidAttributePrivateKeyFormat@-67800$errSecInvalidAttributePublicKeyFormat@-67798$errSecInvalidAttributeRandom@-67756$errSecInvalidAttributeRounds@-67768$errSecInvalidAttributeSalt@-67752$errSecInvalidAttributeSeed@-67758$errSecInvalidAttributeStartDate@-67780$errSecInvalidAttributeSubprime@-67790$errSecInvalidAttributeSymmetricKeyFormat@-67802$errSecInvalidAttributeVersion@-67784$errSecInvalidAttributeWrappedKeyFormat@-67804$errSecInvalidAuthority@-67824$errSecInvalidAuthorityKeyID@-67606$errSecInvalidBaseACLs@-67851$errSecInvalidBundleInfo@-67857$errSecInvalidCRL@-67830$errSecInvalidCRLEncoding@-67828$errSecInvalidCRLGroup@-67816$errSecInvalidCRLIndex@-67858$errSecInvalidCRLType@-67829$errSecInvalidCallback@-25298$errSecInvalidCertAuthority@-67826$errSecInvalidCertificateGroup@-67691$errSecInvalidCertificateRef@-67690$errSecInvalidContext@-67746$errSecInvalidDBList@-67681$errSecInvalidDBLocation@-67875$errSecInvalidData@-67673$errSecInvalidDatabaseBlob@-67598$errSecInvalidDigestAlgorithm@-67815$errSecInvalidEncoding@-67853$errSecInvalidExtendedKeyUsage@-67609$errSecInvalidFormType@-67831$errSecInvalidGUID@-67679$errSecInvalidHandle@-67680$errSecInvalidHandleUsage@-67668$errSecInvalidID@-67832$errSecInvalidIDLinkage@-67610$errSecInvalidIdentifier@-67833$errSecInvalidIndex@-67834$errSecInvalidIndexInfo@-67877$errSecInvalidInputVector@-67744$errSecInvalidItemRef@-25304$errSecInvalidKeyAttributeMask@-67738$errSecInvalidKeyBlob@-67599$errSecInvalidKeyFormat@-67742$errSecInvalidKeyHierarchy@-67713$errSecInvalidKeyLabel@-67740$errSecInvalidKeyRef@-67712$errSecInvalidKeyUsageForPolicy@-67608$errSecInvalidKeyUsageMask@-67736$errSecInvalidKeychain@-25295$errSecInvalidLoginName@-67813$errSecInvalidModifyMode@-67879$errSecInvalidName@-67689$errSecInvalidNetworkAddress@-67683$errSecInvalidNewOwner@-67878$errSecInvalidNumberOfFields@-67685$errSecInvalidOutputVector@-67745$errSecInvalidOwnerEdit@-25244$errSecInvalidPVC@-67708$errSecInvalidParsingModule@-67868$errSecInvalidPassthroughID@-67682$errSecInvalidPasswordRef@-25261$errSecInvalidPointer@-67675$errSecInvalidPolicyIdentifiers@-67835$errSecInvalidPrefsDomain@-25319$errSecInvalidQuery@-67693$errSecInvalidReason@-67837$errSecInvalidRecord@-67701$errSecInvalidRequestInputs@-67838$errSecInvalidRequestor@-67855$errSecInvalidResponseVector@-67839$errSecInvalidRoot@-67612$errSecInvalidSampleValue@-67703$errSecInvalidScope@-67706$errSecInvalidSearchRef@-25305$errSecInvalidServiceMask@-67717$errSecInvalidSignature@-67688$errSecInvalidStopOnPolicy@-67840$errSecInvalidSubServiceID@-67719$errSecInvalidSubjectKeyID@-67607$errSecInvalidSubjectName@-67655$errSecInvalidTimeString@-67836$errSecInvalidTrustSetting@-25242$errSecInvalidTrustSettings@-25262$errSecInvalidTuple@-67841$errSecInvalidTupleCredendtials@-67852$errSecInvalidTupleGroup@-67850$errSecInvalidValidityPeriod@-67854$errSecInvalidValue@-67694$errSecItemNotFound@-25300$errSecKeyBlobTypeIncorrect@-67732$errSecKeyHeaderInconsistent@-67733$errSecKeyIsSensitive@-25258$errSecKeySizeNotAllowed@-25311$errSecKeyUsageIncorrect@-67731$errSecLibraryReferenceNotFound@-67715$errSecMDSError@-67674$errSecMemoryError@-67672$errSecMissingAlgorithmParms@-67771$errSecMissingAttributeAccessCredentials@-67797$errSecMissingAttributeBase@-67789$errSecMissingAttributeBlockSize@-67765$errSecMissingAttributeDLDBHandle@-67795$errSecMissingAttributeEffectiveBits@-67779$errSecMissingAttributeEndDate@-67783$errSecMissingAttributeInitVector@-67751$errSecMissingAttributeIterationCount@-67793$errSecMissingAttributeKey@-67749$errSecMissingAttributeKeyLength@-67763$errSecMissingAttributeKeyType@-67775$errSecMissingAttributeLabel@-67773$errSecMissingAttributeMode@-67777$errSecMissingAttributeOutputSize@-67767$errSecMissingAttributePadding@-67755$errSecMissingAttributePassphrase@-67761$errSecMissingAttributePrime@-67787$errSecMissingAttributePrivateKeyFormat@-67801$errSecMissingAttributePublicKeyFormat@-67799$errSecMissingAttributeRandom@-67757$errSecMissingAttributeRounds@-67769$errSecMissingAttributeSalt@-67753$errSecMissingAttributeSeed@-67759$errSecMissingAttributeStartDate@-67781$errSecMissingAttributeSubprime@-67791$errSecMissingAttributeSymmetricKeyFormat@-67803$errSecMissingAttributeVersion@-67785$errSecMissingAttributeWrappedKeyFormat@-67805$errSecMissingEntitlement@-34018$errSecMissingRequiredExtension@-67880$errSecMissingValue@-67871$errSecMobileMeCSRVerifyFailure@-67665$errSecMobileMeFailedConsistencyCheck@-67666$errSecMobileMeNoRequestPending@-67664$errSecMobileMeRequestAlreadyPending@-67663$errSecMobileMeRequestQueued@-67657$errSecMobileMeRequestRedirected@-67658$errSecMobileMeServerAlreadyExists@-67661$errSecMobileMeServerError@-67659$errSecMobileMeServerNotAvailable@-67660$errSecMobileMeServerServiceErr@-67662$errSecModuleManagerInitializeFailed@-67721$errSecModuleManagerNotFound@-67722$errSecModuleManifestVerifyFailed@-67678$errSecModuleNotLoaded@-67718$errSecMultipleExecSegments@-66995$errSecMultiplePrivKeys@-25259$errSecMultipleValuesUnsupported@-67842$errSecNetworkFailure@-67636$errSecNoAccessForItem@-25243$errSecNoBasicConstraints@-67604$errSecNoBasicConstraintsCA@-67605$errSecNoCertificateModule@-25313$errSecNoDefaultAuthority@-67844$errSecNoDefaultKeychain@-25307$errSecNoFieldValues@-67859$errSecNoPolicyModule@-25314$errSecNoStorageModule@-25312$errSecNoSuchAttr@-25303$errSecNoSuchClass@-25306$errSecNoSuchKeychain@-25294$errSecNoTrustSettings@-25263$errSecNotAvailable@-25291$errSecNotInitialized@-67667$errSecNotLoggedIn@-67729$errSecNotSigner@-26267$errSecNotTrusted@-67843$errSecOCSPBadRequest@-67631$errSecOCSPBadResponse@-67630$errSecOCSPNoSigner@-67640$errSecOCSPNotTrustedToAnchor@-67637$errSecOCSPResponderInternalError@-67642$errSecOCSPResponderMalformedReq@-67641$errSecOCSPResponderSignatureRequired@-67644$errSecOCSPResponderTryLater@-67643$errSecOCSPResponderUnauthorized@-67645$errSecOCSPResponseNonceMismatch@-67646$errSecOCSPSignatureError@-67639$errSecOCSPStatusUnrecognized@-67633$errSecOCSPUnavailable@-67632$errSecOpWr@-49$errSecOutputLengthError@-67725$errSecPVCAlreadyConfigured@-67707$errSecPVCReferentNotFound@-67669$errSecParam@-50$errSecPassphraseRequired@-25260$errSecPathLengthConstraintExceeded@-67611$errSecPkcs12VerifyFailure@-25264$errSecPolicyNotFound@-25241$errSecPrivilegeNotGranted@-67705$errSecPrivilegeNotSupported@-67726$errSecPublicKeyInconsistent@-67811$errSecQuerySizeUnknown@-67809$errSecQuotaExceeded@-67596$errSecReadOnly@-25292$errSecReadOnlyAttr@-25309$errSecRecordModified@-67638$errSecRejectedForm@-67845$errSecRequestDescriptor@-67856$errSecRequestLost@-67846$errSecRequestRejected@-67847$errSecResourceSignBadCertChainLength@-67652$errSecResourceSignBadExtKeyUsage@-67653$errSecSMIMEBadExtendedKeyUsage@-67624$errSecSMIMEBadKeyUsage@-67625$errSecSMIMEEmailAddressesNotFound@-67623$errSecSMIMEKeyUsageNotCritical@-67626$errSecSMIMENoEmailAddress@-67627$errSecSMIMESubjAltNameNotCritical@-67628$errSecSSLBadExtendedKeyUsage@-67629$errSecSelfCheckFailed@-67676$errSecServiceNotAvailable@-67585$errSecSigningTimeMissing@-67894$errSecStagedOperationInProgress@-67806$errSecStagedOperationNotStarted@-67807$errSecSuccess@0$errSecTagNotFound@-67692$errSecTimestampAddInfoNotAvailable@-67892$errSecTimestampBadAlg@-67886$errSecTimestampBadDataFormat@-67888$errSecTimestampBadRequest@-67887$errSecTimestampInvalid@-67883$errSecTimestampMissing@-67882$errSecTimestampNotTrusted@-67884$errSecTimestampRejection@-67895$errSecTimestampRevocationNotification@-67898$errSecTimestampRevocationWarning@-67897$errSecTimestampServiceNotAvailable@-67885$errSecTimestampSystemFailure@-67893$errSecTimestampTimeNotAvailable@-67889$errSecTimestampUnacceptedExtension@-67891$errSecTimestampUnacceptedPolicy@-67890$errSecTimestampWaiting@-67896$errSecTrustNotAvailable@-25245$errSecTrustSettingDeny@-67654$errSecUnimplemented@-4$errSecUnknownCRLExtension@-67619$errSecUnknownCertExtension@-67618$errSecUnknownCriticalExtensionFlag@-67603$errSecUnknownFormat@-25257$errSecUnknownQualifiedCertStatement@-67656$errSecUnknownTag@-67687$errSecUnsupportedAddressType@-67848$errSecUnsupportedFieldFormat@-67860$errSecUnsupportedFormat@-25256$errSecUnsupportedIndexInfo@-67861$errSecUnsupportedKeyAttributeMask@-67739$errSecUnsupportedKeyFormat@-67734$errSecUnsupportedKeyLabel@-67741$errSecUnsupportedKeySize@-67735$errSecUnsupportedKeyUsageMask@-67737$errSecUnsupportedLocality@-67862$errSecUnsupportedNumAttributes@-67863$errSecUnsupportedNumIndexes@-67864$errSecUnsupportedNumRecordTypes@-67865$errSecUnsupportedNumSelectionPreds@-67873$errSecUnsupportedOperator@-67874$errSecUnsupportedQueryLimits@-67872$errSecUnsupportedService@-67849$errSecUnsupportedVectorOfBuffers@-67743$errSecUserCanceled@-128$errSecVerificationFailure@-67686$errSecVerifyActionFailed@-67825$errSecVerifyFailed@-67808$errSecWrPerm@-61$errSecWrongSecVersion@-25310$errSecureDownloadInvalidDownload@-20053$errSecureDownloadInvalidTicket@-20052$errSessionAuthorizationDenied@-60502$errSessionInvalidAttributes@-60501$errSessionInvalidId@-60500$errSessionSuccess@0$errSessionValueNotSet@-60503$kAlwaysAuthenticate@1$kAuthorizationExternalFormLength@32$kAuthorizationFlagCanNotPreAuthorize@1$kAuthorizationFlagDefaults@0$kAuthorizationFlagDestroyRights@8$kAuthorizationFlagExtendRights@2$kAuthorizationFlagInteractionAllowed@1$kAuthorizationFlagNoData@1048576$kAuthorizationFlagPartialRights@4$kAuthorizationFlagPreAuthorize@16$kAuthorizationResultAllow@0$kAuthorizationResultDeny@1$kAuthorizationResultUndefined@2$kAuthorizationResultUserCanceled@3$kCMSAttrAppleCodesigningHashAgility@16$kCMSAttrAppleCodesigningHashAgilityV2@32$kCMSAttrAppleExpirationTime@64$kCMSAttrNone@0$kCMSAttrSigningTime@8$kCMSAttrSmimeCapabilities@1$kCMSAttrSmimeEncryptionKeyPrefs@2$kCMSAttrSmimeMSEncryptionKeyPrefs@4$kCMSCertificateChain@2$kCMSCertificateChainWithRoot@3$kCMSCertificateChainWithRootOrFail@4$kCMSCertificateNone@0$kCMSCertificateSignerOnly@1$kCMSSignerInvalidCert@4$kCMSSignerInvalidIndex@5$kCMSSignerInvalidSignature@3$kCMSSignerNeedsDetachedContent@2$kCMSSignerUnsigned@0$kCMSSignerValid@1$kDTLSProtocol1@9$kDTLSProtocol12@11$kNeverAuthenticate@0$kSSLAborted@4$kSSLCiphersuiteGroupATS@3$kSSLCiphersuiteGroupATSCompatibility@4$kSSLCiphersuiteGroupCompatibility@1$kSSLCiphersuiteGroupDefault@0$kSSLCiphersuiteGroupLegacy@2$kSSLClientCertNone@0$kSSLClientCertRejected@3$kSSLClientCertRequested@1$kSSLClientCertSent@2$kSSLClientSide@1$kSSLClosed@3$kSSLConnected@2$kSSLDatagramType@1$kSSLHandshake@1$kSSLIdle@0$kSSLProtocol2@1$kSSLProtocol3@2$kSSLProtocol3Only@3$kSSLProtocolAll@6$kSSLProtocolUnknown@0$kSSLServerSide@0$kSSLSessionOptionAllowRenegotiation@8$kSSLSessionOptionAllowServerIdentityChange@5$kSSLSessionOptionBreakOnCertRequested@1$kSSLSessionOptionBreakOnClientAuth@2$kSSLSessionOptionBreakOnClientHello@7$kSSLSessionOptionBreakOnServerAuth@0$kSSLSessionOptionEnableSessionTickets@9$kSSLSessionOptionFallback@6$kSSLSessionOptionFalseStart@3$kSSLSessionOptionSendOneByteRecord@4$kSSLStreamType@0$kSec3DES192@192$kSecAES128@128$kSecAES192@192$kSecAES256@256$kSecAccessControlAnd@32768$kSecAccessControlApplicationPassword@2147483648$kSecAccessControlBiometryAny@2$kSecAccessControlBiometryCurrentSet@8$kSecAccessControlDevicePasscode@16$kSecAccessControlOr@16384$kSecAccessControlPrivateKeyUsage@1073741824$kSecAccessControlTouchIDAny@2$kSecAccessControlTouchIDCurrentSet@8$kSecAccessControlUserPresence@1$kSecAccessControlWatch@32$kSecAccountItemAttr@1633903476$kSecAddEvent@3$kSecAddEventMask@8$kSecAddressItemAttr@1633969266$kSecAlias@1634494835$kSecAppleSharePasswordItemClass@1634953328$kSecAuthenticationTypeAny@0$kSecAuthenticationTypeDPA@1633775716$kSecAuthenticationTypeDefault@1953261156$kSecAuthenticationTypeHTMLForm@1836216166$kSecAuthenticationTypeHTTPBasic@1886680168$kSecAuthenticationTypeHTTPDigest@1685353576$kSecAuthenticationTypeItemAttr@1635023216$kSecAuthenticationTypeMSN@1634628461$kSecAuthenticationTypeNTLM@1835824238$kSecAuthenticationTypeRPA@1633775730$kSecCSBasicValidateOnly@6$kSecCSCalculateCMSDigest@64$kSecCSCheckAllArchitectures@1$kSecCSCheckGatekeeperArchitectures@65$kSecCSCheckNestedCode@8$kSecCSCheckTrustedAnchors@134217728$kSecCSConsiderExpiration@2147483648$kSecCSContentInformation@16$kSecCSDedicatedHost@1$kSecCSDefaultFlags@0$kSecCSDoNotValidateExecutable@2$kSecCSDoNotValidateResources@4$kSecCSDynamicInformation@8$kSecCSEnforceRevocationChecks@1073741824$kSecCSFullReport@32$kSecCSGenerateGuestHash@2$kSecCSInternalInformation@1$kSecCSNoNetworkAccess@536870912$kSecCSQuickCheck@67108864$kSecCSReportProgress@268435456$kSecCSRequirementInformation@4$kSecCSRestrictSidebandData@512$kSecCSRestrictSymlinks@128$kSecCSRestrictToAppLike@256$kSecCSSigningInformation@2$kSecCSSingleThreaded@4096$kSecCSSkipResourceDirectory@32$kSecCSStrictValidate@16$kSecCSUseAllArchitectures@1$kSecCSUseSoftwareSigningCert@1024$kSecCSValidatePEH@2048$kSecCertificateEncoding@1667591779$kSecCertificateItemClass@2147487744$kSecCertificateType@1668577648$kSecCodeSignatureAdhoc@2$kSecCodeSignatureEnforcement@4096$kSecCodeSignatureForceExpiration@1024$kSecCodeSignatureForceHard@256$kSecCodeSignatureForceKill@512$kSecCodeSignatureHashSHA1@1$kSecCodeSignatureHashSHA256@2$kSecCodeSignatureHashSHA256Truncated@3$kSecCodeSignatureHashSHA384@4$kSecCodeSignatureHashSHA512@5$kSecCodeSignatureHost@1$kSecCodeSignatureLibraryValidation@8192$kSecCodeSignatureNoHash@0$kSecCodeSignatureRestrict@2048$kSecCodeSignatureRuntime@65536$kSecCodeStatusDebugged@268435456$kSecCodeStatusHard@256$kSecCodeStatusKill@512$kSecCodeStatusPlatform@67108864$kSecCodeStatusValid@1$kSecCommentItemAttr@1768123764$kSecCreationDateItemAttr@1667522932$kSecCreatorItemAttr@1668445298$kSecCredentialTypeDefault@0$kSecCredentialTypeNoUI@2$kSecCredentialTypeWithUI@1$kSecCrlEncoding@1668443747$kSecCrlType@1668445296$kSecCustomIconItemAttr@1668641641$kSecDataAccessEvent@10$kSecDataAccessEventMask@1024$kSecDefaultChangedEvent@9$kSecDefaultChangedEventMask@512$kSecDefaultKeySize@0$kSecDeleteEvent@4$kSecDeleteEventMask@16$kSecDescriptionItemAttr@1684370275$kSecDesignatedRequirementType@3$kSecEveryEventMask@4294967295$kSecFormatBSAFE@3$kSecFormatNetscapeCertSequence@13$kSecFormatOpenSSL@1$kSecFormatPEMSequence@10$kSecFormatPKCS12@12$kSecFormatPKCS7@11$kSecFormatRawKey@4$kSecFormatSSH@2$kSecFormatSSHv2@14$kSecFormatUnknown@0$kSecFormatWrappedLSH@8$kSecFormatWrappedOpenSSL@6$kSecFormatWrappedPKCS8@5$kSecFormatWrappedSSH@7$kSecFormatX509Cert@9$kSecGenericItemAttr@1734700641$kSecGenericPasswordItemClass@1734700656$kSecGuestRequirementType@2$kSecHonorRoot@256$kSecHostRequirementType@1$kSecInternetPasswordItemClass@1768842612$kSecInvalidRequirementType@6$kSecInvisibleItemAttr@1768846953$kSecItemPemArmour@1$kSecItemTypeAggregate@5$kSecItemTypeCertificate@4$kSecItemTypePrivateKey@1$kSecItemTypePublicKey@2$kSecItemTypeSessionKey@3$kSecItemTypeUnknown@0$kSecKeyAlias@2$kSecKeyAlwaysSensitive@15$kSecKeyApplicationTag@7$kSecKeyDecrypt@19$kSecKeyDerive@20$kSecKeyEffectiveKeySize@11$kSecKeyEncrypt@18$kSecKeyEndDate@13$kSecKeyExtractable@16$kSecKeyImportOnlyOne@1$kSecKeyKeyClass@0$kSecKeyKeyCreator@8$kSecKeyKeySizeInBits@10$kSecKeyKeyType@9$kSecKeyLabel@6$kSecKeyModifiable@5$kSecKeyNeverExtractable@17$kSecKeyNoAccessControl@4$kSecKeyOperationTypeDecrypt@3$kSecKeyOperationTypeEncrypt@2$kSecKeyOperationTypeKeyExchange@4$kSecKeyOperationTypeSign@0$kSecKeyOperationTypeVerify@1$kSecKeyPermanent@3$kSecKeyPrintName@1$kSecKeyPrivate@4$kSecKeySecurePassphrase@2$kSecKeySensitive@14$kSecKeySign@21$kSecKeySignRecover@23$kSecKeyStartDate@12$kSecKeyUnwrap@26$kSecKeyUsageAll@2147483647$kSecKeyUsageCRLSign@64$kSecKeyUsageContentCommitment@2$kSecKeyUsageCritical@2147483648$kSecKeyUsageDataEncipherment@8$kSecKeyUsageDecipherOnly@256$kSecKeyUsageDigitalSignature@1$kSecKeyUsageEncipherOnly@128$kSecKeyUsageKeyAgreement@16$kSecKeyUsageKeyCertSign@32$kSecKeyUsageKeyEncipherment@4$kSecKeyUsageNonRepudiation@2$kSecKeyUsageUnspecified@0$kSecKeyVerify@22$kSecKeyVerifyRecover@24$kSecKeyWrap@25$kSecKeychainListChangedEvent@11$kSecKeychainListChangedMask@2048$kSecKeychainPromptInvalid@64$kSecKeychainPromptInvalidAct@128$kSecKeychainPromptRequirePassphase@1$kSecKeychainPromptUnsigned@16$kSecKeychainPromptUnsignedAct@32$kSecLabelItemAttr@1818321516$kSecLibraryRequirementType@4$kSecLockEvent@1$kSecLockEventMask@2$kSecMatchBits@3$kSecModDateItemAttr@1835295092$kSecNegativeItemAttr@1852139361$kSecNoGuest@0$kSecPaddingNone@0$kSecPaddingOAEP@2$kSecPaddingPKCS1@1$kSecPaddingPKCS1MD2@32768$kSecPaddingPKCS1MD5@32769$kSecPaddingPKCS1SHA1@32770$kSecPaddingPKCS1SHA224@32771$kSecPaddingPKCS1SHA256@32772$kSecPaddingPKCS1SHA384@32773$kSecPaddingPKCS1SHA512@32774$kSecPaddingSigRaw@16384$kSecPasswordChangedEvent@6$kSecPasswordChangedEventMask@64$kSecPathItemAttr@1885434984$kSecPluginRequirementType@5$kSecPortItemAttr@1886351988$kSecPreferencesDomainCommon@2$kSecPreferencesDomainDynamic@3$kSecPreferencesDomainSystem@1$kSecPreferencesDomainUser@0$kSecPrivateKeyItemClass@16$kSecProtocolItemAttr@1886675820$kSecProtocolTypeAFP@1634103328$kSecProtocolTypeAny@0$kSecProtocolTypeAppleTalk@1635019883$kSecProtocolTypeCIFS@1667851891$kSecProtocolTypeCVSpserver@1668707184$kSecProtocolTypeDAAP@1684103536$kSecProtocolTypeEPPC@1701867619$kSecProtocolTypeFTP@1718906912$kSecProtocolTypeFTPAccount@1718906977$kSecProtocolTypeFTPProxy@1718907000$kSecProtocolTypeFTPS@1718906995$kSecProtocolTypeHTTP@1752462448$kSecProtocolTypeHTTPProxy@1752461432$kSecProtocolTypeHTTPS@1752461427$kSecProtocolTypeHTTPSProxy@1752462200$kSecProtocolTypeIMAP@1768776048$kSecProtocolTypeIMAPS@1768779891$kSecProtocolTypeIPP@1768976416$kSecProtocolTypeIRC@1769104160$kSecProtocolTypeIRCS@1769104243$kSecProtocolTypeLDAP@1818517872$kSecProtocolTypeLDAPS@1818521715$kSecProtocolTypeNNTP@1852732528$kSecProtocolTypeNNTPS@1853124723$kSecProtocolTypePOP3@1886351411$kSecProtocolTypePOP3S@1886351475$kSecProtocolTypeRTSP@1920234352$kSecProtocolTypeRTSPProxy@1920234360$kSecProtocolTypeSMB@1936548384$kSecProtocolTypeSMTP@1936553072$kSecProtocolTypeSOCKS@1936685088$kSecProtocolTypeSSH@1936943136$kSecProtocolTypeSVN@1937141280$kSecProtocolTypeTelnet@1952803950$kSecProtocolTypeTelnetS@1952803955$kSecPublicKeyItemClass@15$kSecRSAMax@4096$kSecRSAMin@1024$kSecReadPermStatus@2$kSecRevocationCRLMethod@2$kSecRevocationNetworkAccessDisabled@16$kSecRevocationOCSPMethod@1$kSecRevocationPreferCRL@4$kSecRevocationRequirePositiveResponse@8$kSecRevocationUseAnyAvailableMethod@3$kSecScriptCodeItemAttr@1935897200$kSecSecurityDomainItemAttr@1935961454$kSecServerItemAttr@1936881266$kSecServiceItemAttr@1937138533$kSecSignatureItemAttr@1936943463$kSecSymmetricKeyItemClass@17$kSecTransformErrorAbortInProgress@19$kSecTransformErrorAborted@20$kSecTransformErrorAttributeNotFound@1$kSecTransformErrorInvalidAlgorithm@6$kSecTransformErrorInvalidConnection@15$kSecTransformErrorInvalidInput@10$kSecTransformErrorInvalidInputDictionary@5$kSecTransformErrorInvalidLength@7$kSecTransformErrorInvalidOperation@2$kSecTransformErrorInvalidType@8$kSecTransformErrorMissingParameter@14$kSecTransformErrorMoreThanOneOutput@4$kSecTransformErrorNameAlreadyRegistered@11$kSecTransformErrorNotInitializedCorrectly@3$kSecTransformErrorUnsupportedAttribute@12$kSecTransformInvalidArgument@21$kSecTransformInvalidOverride@17$kSecTransformMetaAttributeCanCycle@7$kSecTransformMetaAttributeDeferred@5$kSecTransformMetaAttributeExternalize@8$kSecTransformMetaAttributeHasInboundConnection@10$kSecTransformMetaAttributeHasOutboundConnections@9$kSecTransformMetaAttributeName@1$kSecTransformMetaAttributeRef@2$kSecTransformMetaAttributeRequired@3$kSecTransformMetaAttributeRequiresOutboundConnection@4$kSecTransformMetaAttributeStream@6$kSecTransformMetaAttributeValue@0$kSecTransformOperationNotSupportedOnGroup@13$kSecTransformTransformIsExecuting@16$kSecTransformTransformIsNotRegistered@18$kSecTrustOptionAllowExpired@1$kSecTrustOptionAllowExpiredRoot@8$kSecTrustOptionFetchIssuerFromNet@4$kSecTrustOptionImplicitAnchors@64$kSecTrustOptionLeafIsCA@2$kSecTrustOptionRequireRevPerCert@16$kSecTrustOptionUseTrustSettings@32$kSecTrustResultConfirm@2$kSecTrustResultDeny@3$kSecTrustResultFatalTrustFailure@6$kSecTrustResultInvalid@0$kSecTrustResultOtherError@7$kSecTrustResultProceed@1$kSecTrustResultRecoverableTrustFailure@5$kSecTrustResultUnspecified@4$kSecTrustSettingsChangedEvent@12$kSecTrustSettingsChangedEventMask@4096$kSecTrustSettingsDefaultRootCertSetting@-1$kSecTrustSettingsDomainAdmin@1$kSecTrustSettingsDomainSystem@2$kSecTrustSettingsDomainUser@0$kSecTrustSettingsKeyUseAny@4294967295$kSecTrustSettingsKeyUseEnDecryptData@2$kSecTrustSettingsKeyUseEnDecryptKey@4$kSecTrustSettingsKeyUseKeyExchange@32$kSecTrustSettingsKeyUseSignCert@8$kSecTrustSettingsKeyUseSignRevocation@16$kSecTrustSettingsKeyUseSignature@1$kSecTrustSettingsResultDeny@3$kSecTrustSettingsResultInvalid@0$kSecTrustSettingsResultTrustAsRoot@2$kSecTrustSettingsResultTrustRoot@1$kSecTrustSettingsResultUnspecified@4$kSecTypeItemAttr@1954115685$kSecUnlockEvent@2$kSecUnlockEventMask@4$kSecUnlockStateStatus@1$kSecUpdateEvent@5$kSecUpdateEventMask@32$kSecUseOnlyGID@2$kSecUseOnlyUID@1$kSecVolumeItemAttr@1986817381$kSecWritePermStatus@4$kSecp192r1@192$kSecp256r1@256$kSecp384r1@384$kSecp521r1@521$kSecureDownloadDoNotEvaluateSigner@0$kSecureDownloadEvaluateSigner@1$kSecureDownloadFailEvaluation@2$kTLSProtocol1@4$kTLSProtocol11@7$kTLSProtocol12@8$kTLSProtocol13@10$kTLSProtocol1Only@5$kTLSProtocolMaxSupported@999$kTryAuthenticate@2$noSecuritySession@0$sessionHasGraphicAccess@16$sessionHasTTY@32$sessionIsRemote@4096$sessionIsRoot@1$sessionKeepCurrentBootstrap@32768$tls_ciphersuite_AES_128_GCM_SHA256@4865$tls_ciphersuite_AES_256_GCM_SHA384@4866$tls_ciphersuite_CHACHA20_POLY1305_SHA256@4867$tls_ciphersuite_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA@49160$tls_ciphersuite_ECDHE_ECDSA_WITH_AES_128_CBC_SHA@49161$tls_ciphersuite_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256@49187$tls_ciphersuite_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256@49195$tls_ciphersuite_ECDHE_ECDSA_WITH_AES_256_CBC_SHA@49162$tls_ciphersuite_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384@49188$tls_ciphersuite_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384@49196$tls_ciphersuite_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256@52393$tls_ciphersuite_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA@49170$tls_ciphersuite_ECDHE_RSA_WITH_AES_128_CBC_SHA@49171$tls_ciphersuite_ECDHE_RSA_WITH_AES_128_CBC_SHA256@49191$tls_ciphersuite_ECDHE_RSA_WITH_AES_128_GCM_SHA256@49199$tls_ciphersuite_ECDHE_RSA_WITH_AES_256_CBC_SHA@49172$tls_ciphersuite_ECDHE_RSA_WITH_AES_256_CBC_SHA384@49192$tls_ciphersuite_ECDHE_RSA_WITH_AES_256_GCM_SHA384@49200$tls_ciphersuite_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256@52392$tls_ciphersuite_RSA_WITH_3DES_EDE_CBC_SHA@10$tls_ciphersuite_RSA_WITH_AES_128_CBC_SHA@47$tls_ciphersuite_RSA_WITH_AES_128_CBC_SHA256@60$tls_ciphersuite_RSA_WITH_AES_128_GCM_SHA256@156$tls_ciphersuite_RSA_WITH_AES_256_CBC_SHA@53$tls_ciphersuite_RSA_WITH_AES_256_CBC_SHA256@61$tls_ciphersuite_RSA_WITH_AES_256_GCM_SHA384@157$tls_ciphersuite_group_ats@3$tls_ciphersuite_group_ats_compatibility@4$tls_ciphersuite_group_compatibility@1$tls_ciphersuite_group_default@0$tls_ciphersuite_group_legacy@2$tls_protocol_version_DTLSv10@65279$tls_protocol_version_DTLSv12@65277$tls_protocol_version_TLSv10@769$tls_protocol_version_TLSv11@770$tls_protocol_version_TLSv12@771$tls_protocol_version_TLSv13@772$"""
misc.update(
{
"kSecTrustSettingsAllowedError": "kSecTrustSettingsAllowedError",
"kAuthorizationComment": b"comment",
"kAuthorizationEnvironmentIcon": b"icon",
"kAuthorizationRuleClassDeny": b"deny",
"kSecTrustSettingsPolicyString": "kSecTrustSettingsPolicyString",
"kAuthorizationEnvironmentUsername": b"username",
"kAuthorizationRightExecute": b"system.privilege.admin",
"kAuthorizationRightRule": b"rule",
"kAuthorizationRuleIsAdmin": b"is-admin",
"kAuthorizationRuleClassAllow": b"allow",
"kAuthorizationEnvironmentPassword": b"password",
"kAuthorizationRuleAuthenticateAsAdmin": b"authenticate-admin",
"kAuthorizationPamResult": b"pam_result",
"kSecTrustSettingsResult": "kSecTrustSettingsResult",
"kAuthorizationEnvironmentPrompt": b"prompt",
"kAuthorizationRuleAuthenticateAsSessionUser": b"authenticate-session-owner",
"kSecTrustSettingsKeyUsage": "kSecTrustSettingsKeyUsage",
"kAuthorizationEnvironmentShared": b"shared",
"kSecTrustSettingsPolicy": "kSecTrustSettingsPolicy",
"kSecTrustSettingsApplication": "kSecTrustSettingsApplication",
}
)
functions = {
"CMSEncoderGetCertificateChainMode": (b"i@o^I",),
"SecKeyGeneratePair": (
b"i@o^@o^@",
"",
{
"arguments": {
1: {"already_cfretained": True},
2: {"already_cfretained": True},
}
},
),
"SecCodeCopyPath": (
b"i@Io^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecCertificateCopySerialNumber": (
b"@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecKeyIsAlgorithmSupported": (sel32or64(b"Z@i@", b"Z@q@"),),
"SecTrustSetPolicies": (b"i@@",),
"SSLSetError": (b"i@i",),
"SecTransformCustomSetAttribute": (
sel32or64(
b"@^{OpaqueSecTransformImplementation=}@i@",
b"@^{OpaqueSecTransformImplementation=}@q@",
),
),
"SSLGetSessionOption": (b"i@io^Z",),
"SecStaticCodeCreateWithPath": (
b"i@Io^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecTrustGetCertificateAtIndex": (sel32or64(b"@@i", b"@@q"),),
"SecTransformSetTransformAction": (
b"@^{OpaqueSecTransformImplementation=}@@?",
"",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"@"},
"arguments": {0: {"type": "^v"}},
}
}
}
},
),
"sec_protocol_options_set_tls_tickets_enabled": (b"vB",),
"SSLGetSessionState": (b"i@o^i",),
"SecItemImport": (
b"i@@N^IN^IIn^{_SecItemImportExportKeyParameters=II@@@@@@}@o^@",
"",
{"arguments": {7: {"already_cfretained": True}}},
),
"SSLGetMaxDatagramRecordSize": (b"i@o^L",),
"sec_protocol_options_set_tls_ocsp_enabled": (b"vB",),
"SecTrustEvaluate": (b"i@o^I",),
"CMSDecoderIsContentEncrypted": (b"i@o^Z",),
"SecTaskCreateFromSelf": (b"@@", "", {"retval": {"already_cfretained": True}}),
"SecureDownloadCopyCreationDate": (
b"i^{OpaqueSecureDownload=}o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecTransformSetAttributeAction": (
b"@^{OpaqueSecTransformImplementation=}@@@?",
"",
{
"arguments": {
3: {
"callable": {
"retval": {"type": b"@"},
"arguments": {
0: {"type": "^v"},
1: {"type": "@"},
2: {"type": "@"},
},
}
}
}
},
),
"sec_certificate_copy_ref": (b"@@", "", {"retval": {"already_cfretained": True}}),
"SSLCopyDistinguishedNames": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecTrustSetExceptions": (b"B@@",),
"SecItemAdd": (b"i@o^@", "", {"arguments": {1: {"already_cfretained": True}}}),
"SecKeychainItemCopyKeychain": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"sec_protocol_metadata_get_server_name": (
b"^t@",
"",
{"retval": {"c_array_delimited_by_null": True}},
),
"SecPolicyCreateRevocation": (
sel32or64(b"@I", b"@Q"),
"",
{"retval": {"already_cfretained": True}},
),
"SecKeyCreateEncryptedData": (
b"@@@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecCertificateCopyNormalizedSubjectContent": (
b"@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecKeyCreateSignature": (b"@@@@o^@", "", {"retval": {"already_cfretained": True}}),
"CMSDecoderCreate": (b"io^@", "", {"arguments": {0: {"already_cfretained": True}}}),
"CMSDecoderCopyAllCerts": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"sec_protocol_options_add_tls_ciphersuite_group": (b"v@S",),
"SecDigestTransformCreate": (
sel32or64(b"@@io^@", b"@@qo^@"),
"",
{"retval": {"already_cfretained": True}},
),
"SSLSetEncryptionCertificate": (b"i@@",),
"SecHostCreateGuest": (b"iII@@Io^I",),
"SecTrustSettingsCopyCertificates": (
b"iIo^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecKeychainUnlock": (
b"i@In^vZ",
"",
{"arguments": {2: {"c_array_length_in_arg": 1}}},
),
"SSLSetSessionTicketsEnabled": (b"i@Z",),
"SecHostSelectGuest": (b"iII",),
"AuthorizationCopyPrivilegedReference": (b"io^^{AuthorizationOpaqueRef=}",),
"CMSDecoderSetDetachedContent": (b"i@@",),
"sec_identity_create": (b"@@", "", {"retval": {"already_retained": True}}),
"SSLAddDistinguishedName": (
b"i@n^vL",
"",
{"arguments": {1: {"c_array_length_in_arg": 2}}},
),
"SecKeychainItemCopyFromPersistentReference": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecTransformCopyExternalRepresentation": (
b"@@",
"",
{"retval": {"already_cfretained": True}},
),
"sec_protocol_options_append_tls_ciphersuite_group": (b"v@S",),
"SecStaticCodeCheckValidityWithErrors": (
b"i@I@o^@",
"",
{"arguments": {3: {"already_cfretained": True}}},
),
"SSLGetNegotiatedProtocolVersion": (b"i@o^i",),
"sec_protocol_metadata_access_distinguished_names": (
b"B@@?",
"",
{
"arguments": {
1: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": "^v"}, 1: {"type": "@"}},
},
"callable_retained": True,
}
}
},
),
"SecTransformPushbackAttribute": (b"@^{OpaqueSecTransformImplementation=}@@",),
"SecAccessCreateWithOwnerAndACL": (
b"@III@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecTrustCopyResult": (b"@@", "", {"retval": {"already_cfretained": True}}),
"CMSDecoderCopySignerSigningTime": (
b"i@Lo^d",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecTrustSetNetworkFetchAllowed": (b"i@Z",),
"SSLSetCertificate": (b"i@@",),
"SecACLSetContents": (b"i@@@S",),
"sec_protocol_options_append_tls_ciphersuite": (b"v@@",),
"SecTrustGetCertificateCount": (sel32or64(b"i@", b"q@"),),
"SecPKCS12Import": (
b"i@@o^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecTransformSetAttribute": (b"Z@@@o^@",),
"SecTrustSettingsSetTrustSettings": (b"i@I@",),
"SecKeyCopyExternalRepresentation": (
b"@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecTransformCreateGroupTransform": (
b"@",
"",
{"retval": {"already_cfretained": True}},
),
"CMSDecoderSetSearchKeychain": (b"i@@",),
"SecTrustedApplicationSetData": (b"i@@",),
"SSLSetSessionOption": (b"i@iZ",),
"sec_protocol_options_set_peer_authentication_required": (b"vB",),
"SecKeychainSetPreferenceDomain": (b"ii",),
"SecTransformCreateFromExternalRepresentation": (
b"@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SSLGetBufferedReadSize": (b"i@o^L",),
"SecTrustSetVerifyDate": (b"i@@",),
"sec_trust_copy_ref": (b"@@", "", {"retval": {"already_cfretained": True}}),
"SecACLGetTypeID": (sel32or64(b"I", b"Q"),),
"SSLContextGetTypeID": (sel32or64(b"I", b"Q"),),
"SessionCreate": (b"iII",),
"sec_identity_access_certificates": (
b"B@@?",
"",
{
"arguments": {
1: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": "^v"}, 1: {"type": "@"}},
}
}
}
},
),
"SecAccessCopyOwnerAndACL": (
b"i@o^Io^Io^Io^@",
"",
{"arguments": {4: {"already_cfretained": True}}},
),
"SecPolicyCreateWithProperties": (
b"@@@",
"",
{"retval": {"already_cfretained": True}},
),
"SecKeychainLockAll": (b"i",),
"SSLGetPeerDomainName": (
b"i@o^tN^L",
"",
{"arguments": {1: {"c_array_length_in_arg": 2}}},
),
"sec_protocol_metadata_access_ocsp_response": (
b"B@@?",
"",
{
"arguments": {
1: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": "^v"}, 1: {"type": "@"}},
}
}
}
},
),
"SecACLCopyContents": (
b"i@o^@o^@o^S",
"",
{
"arguments": {
1: {"already_cfretained": True},
2: {"already_cfretained": True},
}
},
),
"SecCodeMapMemory": (b"i@I",),
"CMSDecoderCopyContent": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecAccessCreate": (
b"i@@o^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecTransformExecute": (b"@@o^@", "", {"retval": {"already_cfretained": True}}),
"SecCertificateCopyEmailAddresses": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"AuthorizationMakeExternalForm": (
b"i^{AuthorizationOpaqueRef=}o^{_AuthorizationExternalForm=[32C]}",
),
"SecCodeCheckValidityWithErrors": (
b"i@I@o^@",
"",
{"arguments": {3: {"already_cfretained": True}}},
),
"SecCodeCopyDesignatedRequirement": (
b"i@Io^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecTransformNoData": (b"@",),
"SecTransformRegister": (
b"Z@^?o^@",
"",
{
"arguments": {
1: {
"callable": {
"retval": {
"callable": {
"retval": {"type": "@"},
"arguments": {0: {"type": "^v"}},
},
"type": b"@?",
},
"arguments": {
0: {"type": "@"},
1: {"type": "@"},
2: {"type": "^{OpaqueSecTransformImplementation=}"},
},
},
"callable_retained": True,
}
}
},
),
"SecCodeCopyStaticCode": (
b"i@Io^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"CMSEncoderAddSignedAttributes": (b"i@I",),
"SecIdentityCopySystemIdentity": (
b"i@o^@o^@",
"",
{
"arguments": {
1: {"already_cfretained": True},
2: {"already_cfretained": True},
}
},
),
"SecureDownloadGetDownloadSize": (b"i^{OpaqueSecureDownload=}o^q",),
"SecKeychainItemDelete": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecStaticCodeCreateWithPathAndAttributes": (
b"i@I@o^@",
"",
{"arguments": {3: {"already_cfretained": True}}},
),
"sec_identity_create_with_certificates": (
b"@@@",
"",
{"retval": {"already_retained": True}},
),
"SSLCopyPeerTrust": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecKeyVerifySignature": (b"Z@@@@o^@",),
"AuthorizationRightGet": (
b"i^t^@",
"",
{
"arguments": {
0: {"c_array_delimited_by_null": True, "type_modifier": "n"},
1: {"already_retained": True, "type_modifier": "o"},
}
},
),
"SecDecryptTransformCreate": (
b"@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecCertificateCopyNormalizedIssuerContent": (
b"@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecPolicyCreateBasicX509": (b"@", "", {"retval": {"already_cfretained": True}}),
"sec_protocol_options_set_tls_false_start_enabled": (b"vB",),
"SecKeychainLock": (b"i@",),
"SecTrustGetNetworkFetchAllowed": (b"i@o^Z",),
"SecureDownloadCreateWithTicket": (
b"i@^?^v^?^vo^^{OpaqueSecureDownload=}",
"",
{
"arguments": {
1: {
"callable": {
"retval": {"type": b"i"},
"arguments": {0: {"type": "@"}, 1: {"type": "^v"}},
},
"callable_retained": True,
},
3: {
"callable": {
"retval": {"type": b"i"},
"arguments": {
0: {"type": "@"},
1: {"type": "i"},
2: {"type": "^v"},
},
},
"callable_retained": True,
},
}
},
),
"CMSEncoderCopySupportingCerts": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecKeychainItemSetAccess": (b"i@@",),
"sec_protocol_options_set_tls_resumption_enabled": (b"vB",),
"SSLHandshake": (b"i@",),
"SecKeychainAddCallback": (
b"i^?I^v",
"",
{
"arguments": {
0: {
"callable": {
"retval": {"type": b"i"},
"arguments": {
0: {"type": "I"},
1: {"type": "n^{SecKeychainCallbackInfo=I@@i}"},
2: {"type": "^v"},
},
},
"callable_retained": True,
}
}
},
),
"SecureDownloadCopyURLs": (
b"i^{OpaqueSecureDownload=}o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"CMSEncoderAddRecipients": (b"i@@",),
"sec_protocol_options_set_tls_is_fallback_attempt": (b"vB",),
"SecTrustCopyPublicKey": (b"@@", "", {"retval": {"already_cfretained": True}}),
"SecKeyCreateWithData": (b"@@@o^@", "", {"retval": {"already_cfretained": True}}),
"sec_protocol_metadata_get_negotiated_tls_protocol_version": (b"S@",),
"SecKeychainDelete": (b"i@",),
"sec_identity_copy_certificates_ref": (
b"@@",
"",
{"retval": {"already_cfretained": True}},
),
"AuthorizationRightSet": (
b"i^{AuthorizationOpaqueRef=}^t@@@@",
"",
{"arguments": {1: {"c_array_delimited_by_null": True, "type_modifier": "n"}}},
),
"SecACLRemove": (b"i@",),
"CMSDecoderCopyDetachedContent": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecKeychainCreate": (
b"in^tIn^vZ@o^@",
"",
{
"arguments": {
0: {"c_array_delimited_by_null": True},
2: {"c_array_length_in_arg": 1},
5: {"already_cfretained": True},
}
},
),
"SecEncryptTransformGetTypeID": (sel32or64(b"I", b"Q"),),
"SSLGetDiffieHellmanParams": (
b"i@o^vN^L",
"",
{"arguments": {1: {"c_array_length_in_arg": 2}}},
),
"SSLSetSessionConfig": (b"i@@",),
"SecCertificateCreateWithData": (
b"@@@",
"",
{"retval": {"already_cfretained": True}},
),
"SecRequirementCopyData": (
b"i@Io^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"CMSDecoderCopyEncapsulatedContentType": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecRequirementCreateWithStringAndErrors": (
b"i@Io^@o^@",
"",
{"arguments": {3: {"already_cfretained": True}}},
),
"SSLReHandshake": (b"i@",),
"sec_protocol_metadata_get_negotiated_ciphersuite": (b"I@",),
"SSLCopyRequestedPeerName": (
b"i@o^tN^L",
"",
{"arguments": {1: {"c_array_length_in_arg": 2}}},
),
"CMSDecoderCopySignerCert": (
b"i@Lo^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecTrustCopyExceptions": (b"@@", "", {"retval": {"already_cfretained": True}}),
"SecPolicyCreateSSL": (b"@Z@", "", {"retval": {"already_cfretained": True}}),
"SecKeychainItemCreatePersistentReference": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SSLCopyCertificateAuthorities": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecTransformConnectTransforms": (b"@@@@@@o^@",),
"SecAccessCopyACLList": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecHostSelectedGuest": (b"iIo^I",),
"sec_protocol_options_get_default_min_tls_protocol_version": (b"i@",),
"SecAccessCopyMatchingACLList": (
b"@@@",
"",
{"retval": {"already_cfretained": True}},
),
"SecKeychainItemCreateCopy": (
b"i@@@o^@",
"",
{"arguments": {3: {"already_cfretained": True}}},
),
"SecKeyGeneratePairAsync": (
b"v@^{dispatch_queue_s=}@?",
"",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": "^v"},
1: {"type": "@"},
2: {"type": "@"},
3: {"type": "@"},
},
}
}
}
},
),
"SecCertificateCopyData": (b"@@", "", {"retval": {"already_cfretained": True}}),
"sec_protocol_options_set_tls_max_version": (b"v@i",),
"SecKeyGenerateSymmetric": (b"@@o^@", "", {"retval": {"already_cfretained": True}}),
"SecHostSetGuestStatus": (b"iII@I",),
"CMSDecoderCopySignerStatus": (b"i@L@Zo^Io^i",),
"SecTrustCopyCustomAnchorCertificates": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"CMSEncoderCopySignerTimestampWithPolicy": (b"i@@Lo^d",),
"SecTrustSettingsImportExternalRepresentation": (b"iI@",),
"SecTrustCreateWithCertificates": (
b"i@@o^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecHostSetHostingPort": (b"iII",),
"sec_protocol_options_set_tls_sct_enabled": (b"vB",),
"SecDecryptTransformGetTypeID": (sel32or64(b"I", b"Q"),),
"CMSEncoderUpdateContent": (
b"i@n^vL",
"",
{"arguments": {1: {"c_array_length_in_arg": 2}}},
),
"SSLGetNegotiatedCipher": (b"i@o^i",),
"SecTrustCopyProperties": (b"@@", "", {"retval": {"already_cfretained": True}}),
"SecKeyCopyKeyExchangeResult": (
b"@@@@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecTrustEvaluateWithError": (b"B@o^@",),
"SecPolicyGetTypeID": (sel32or64(b"I", b"Q"),),
"SessionGetInfo": (
b"iI^I^I",
"",
{"arguments": {1: {"type_modifier": "o"}, 2: {"type_modifier": "o"}}},
),
"sec_protocol_metadata_access_supported_signature_algorithms": (
b"B@@?",
"",
{
"arguments": {
1: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": "^v"}, 1: {"type": "S"}},
}
}
}
},
),
"SecTransformFindByName": (b"@@@",),
"SecIdentityCreateWithCertificate": (
b"i@@o^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecTaskCreateWithAuditToken": (
b"@@{?=[8I]}",
"",
{"retval": {"already_cfretained": True}},
),
"SecCertificateCopyValues": (
b"@@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"AuthorizationCreateFromExternalForm": (
b"in^{_AuthorizationExternalForm=[32C]}o^^{AuthorizationOpaqueRef=}",
),
"CMSDecoderUpdateMessage": (
b"i@n^vL",
"",
{"arguments": {1: {"c_array_length_in_arg": 2}}},
),
"CMSEncoderCopyEncapsulatedContentType": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SSLGetConnection": (b"i@o^@",),
"SecKeychainSetUserInteractionAllowed": (b"iZ",),
"SecTrustSetAnchorCertificatesOnly": (b"i@Z",),
"SSLGetPeerID": (b"i@o^vN^L", "", {"arguments": {1: {"c_array_length_in_arg": 2}}}),
"SecTransformCreateReadTransformWithReadStream": (
b"@@",
"",
{"retval": {"already_cfretained": True}},
),
"sec_protocol_options_set_max_tls_protocol_version": (b"v@i",),
"SecRequirementGetTypeID": (sel32or64(b"I", b"Q"),),
"SSLCreateContext": (b"@@ii", "", {"retval": {"already_cfretained": True}}),
"sec_protocol_options_set_challenge_block": (
b"v@@?^{dispatch_queue_s}",
"",
{
"arguments": {
1: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": "^v"},
1: {"type": "@"},
2: {
"callable": {
"retval": {"type": "v"},
"arguments": {0: {"type": "^v"}},
},
"type": "@?",
},
},
}
}
}
},
),
"CMSEncoderGetHasDetachedContent": (b"i@o^Z",),
"SSLSetConnection": (b"i@@",),
"SecKeychainRemoveCallback": (
b"i^?",
"",
{
"arguments": {
0: {
"callable": {
"retval": {"type": b"i"},
"arguments": {
0: {"type": "I"},
1: {"type": "n^{SecKeychainCallbackInfo=I@@i}"},
2: {"type": "^v"},
},
},
"callable_retained": True,
}
}
},
),
"SecCertificateCopyPublicKey": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecPolicyCopyProperties": (b"@@", "", {"retval": {"already_cfretained": True}}),
"CMSEncoderSetEncapsulatedContentTypeOID": (b"i@@",),
"SecDecodeTransformCreate": (
b"@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"sec_protocol_metadata_get_negotiated_protocol": (
b"^t@",
"",
{"retval": {"c_array_delimited_by_null": True}},
),
"SSLSetALPNProtocols": (b"i@@",),
"SSLGetProtocolVersionMin": (b"i@o^i",),
"SSLSetPeerDomainName": (
b"i@n^tL",
"",
{"arguments": {1: {"c_array_length_in_arg": 2}}},
),
"SecSignTransformCreate": (b"@@o^@", "", {"retval": {"already_cfretained": True}}),
"SecTransformGetTypeID": (sel32or64(b"I", b"Q"),),
"SecKeychainGetPath": (
b"i@N^Io^t",
"",
{"arguments": {2: {"c_array_length_in_arg": 1}}},
),
"SecCertificateCopySerialNumberData": (
b"@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecCertificateCopyCommonName": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SSLGetSupportedCiphers": (
b"i@o^iN^L",
"",
{"arguments": {1: {"c_array_length_in_arg": 2}}},
),
"sec_trust_create": (b"@@", "", {"retval": {"already_retained": True}}),
"CMSEncoderSetSignerAlgorithm": (b"i@@",),
"SecCertificateAddToKeychain": (b"i@@",),
"SecKeyGetBlockSize": (b"L@",),
"SecIdentityCopyPrivateKey": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"CMSEncoderAddSupportingCerts": (b"i@@",),
"sec_certificate_create": (b"@@@", "", {"retval": {"already_retained": True}}),
"SSLSetMaxDatagramRecordSize": (b"i@L",),
"CMSDecoderCopySignerTimestamp": (
b"i@Lo^d",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SSLSetDatagramHelloCookie": (
b"i@n^vL",
"",
{"arguments": {1: {"c_array_length_in_arg": 2}}},
),
"sec_identity_copy_ref": (b"@@", "", {"retval": {"already_cfretained": True}}),
"SecTaskGetTypeID": (sel32or64(b"I", b"Q"),),
"sec_protocol_options_get_default_max_tls_protocol_version": (b"i@",),
"SSLSetIOFuncs": (
b"i@^?^?",
"",
{
"arguments": {
1: {
"callable": {
"retval": {"type": b"i"},
"arguments": {
0: {"type": "@"},
1: {"type": "o^v", "c_array_length_in_arg": 2},
2: {"type": "N^L"},
},
},
"callable_retained": True,
},
2: {
"callable": {
"retval": {"type": b"i"},
"arguments": {
0: {"type": "@"},
1: {"type": "n^v", "c_array_length_in_arg": 2},
2: {"type": "N^L"},
},
},
"callable_retained": True,
},
}
},
),
"SecKeychainOpen": (
b"in^to^@",
"",
{
"arguments": {
0: {"c_array_delimited_by_null": True},
1: {"already_cfretained": True},
}
},
),
"SecCodeGetTypeID": (sel32or64(b"I", b"Q"),),
"SecRequirementCreateWithData": (
b"i@Io^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"sec_protocol_options_set_min_tls_protocol_version": (b"v@i",),
"SecCodeCopySigningInformation": (
b"i@Io^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SSLSetProtocolVersionMax": (b"i@i",),
"sec_protocol_metadata_access_pre_shared_keys": (
b"B@@?",
"",
{
"arguments": {
1: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": "^v"},
1: {"type": "@"},
2: {"type": "@"},
},
}
}
}
},
),
"SecKeychainGetStatus": (b"i@o^I",),
"SSLGetClientCertificateState": (b"i@o^i",),
"CMSDecoderCopySignerTimestampCertificates": (
b"i@Lo^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecureDownloadFinished": (b"i^{OpaqueSecureDownload=}",),
"CMSEncoderSetCertificateChainMode": (b"i@I",),
"SecAccessControlCreateWithFlags": (
sel32or64(b"@@@Io^@", b"@@@Qo^@"),
"",
{"retval": {"already_cfretained": True}},
),
"SecTrustEvaluateAsync": (
b"i@^{dispatch_queue_s}@?",
"",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": "^v"},
1: {"type": "@"},
2: {"type": "I"},
},
}
}
}
},
),
"SecureDownloadCopyName": (
b"i^{OpaqueSecureDownload=}o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SSLCopyALPNProtocols": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecKeychainCopySearchList": (
b"io^@",
"",
{"arguments": {0: {"already_cfretained": True}}},
),
"SecDigestTransformGetTypeID": (sel32or64(b"I", b"Q"),),
"SecTrustSetOptions": (b"i@I",),
"SSLGetNumberEnabledCiphers": (b"i@o^L",),
"SecIdentityGetTypeID": (sel32or64(b"I", b"Q"),),
"SecKeychainCopyAccess": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"sec_protocol_options_set_tls_pre_shared_key_identity_hint": (b"v@@",),
"CMSEncoderGetTypeID": (sel32or64(b"I", b"Q"),),
"SecTransformGetAttribute": (b"@@@",),
"CMSDecoderGetNumSigners": (b"i@o^L",),
"SecCertificateCopyPreferred": (
b"@@@",
"",
{"retval": {"already_cfretained": True}},
),
"sec_protocol_options_set_local_identity": (b"v@@",),
"sec_protocol_options_set_tls_min_version": (b"v@i",),
"SecRandomCopyBytes": (
b"i^{__SecRandom=}L^v",
"",
{"arguments": {2: {"type_modifier": "o", "c_array_length_in_arg": 1}}},
),
"CMSDecoderFinalizeMessage": (b"i@",),
"SecKeyWrapSymmetric": (b"@@@@o^@", "", {"retval": {"already_cfretained": True}}),
"SecVerifyTransformCreate": (
b"@@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecCodeCheckValidity": (b"i@I@",),
"CMSEncoderCopyEncodedContent": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"CMSEncoderAddSigners": (b"i@@",),
"sec_protocol_metadata_copy_peer_public_key": (
b"@@@",
"",
{"retval": {"already_retained": True}},
),
"AuthorizationFree": (b"i^{AuthorizationOpaqueRef=}I",),
"SecCopyErrorMessageString": (
b"@i^v",
"",
{"retval": {"already_cfretained": True}},
),
"CMSEncoderCopySigners": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecureDownloadRelease": (b"i^{OpaqueSecureDownload=}",),
"SecTrustSettingsCopyModificationDate": (
b"i@Io^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecureDownloadUpdateWithData": (b"i^{OpaqueSecureDownload=}@",),
"SecKeychainCopyDomainDefault": (
b"iio^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecItemDelete": (b"i@",),
"SecStaticCodeCheckValidity": (b"i@I@",),
"sec_protocol_metadata_create_secret_with_context": (
b"@@L^tL^vL",
"",
{
"retval": {"already_retained": True},
"arguments": {
2: {"type_modifier": "n", "c_array_length_in_arg": 1},
4: {"type_modifier": "n", "c_array_length_in_arg": 3},
},
},
),
"SecCodeCopyHost": (
b"i@Io^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"sec_protocol_metadata_challenge_parameters_are_equal": (b"B@@",),
"SecTrustedApplicationGetTypeID": (sel32or64(b"I", b"Q"),),
"SecTransformSetDataAction": (
b"@^{OpaqueSecTransformImplementation=}@@?",
"",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"@"},
"arguments": {0: {"type": "^v"}, 1: {"type": "@"}},
}
}
}
},
),
"SecKeychainAddGenericPassword": (
b"i@In^tIn^tIn^vo^@",
"",
{
"arguments": {
2: {"c_array_length_in_arg": 1},
4: {"c_array_length_in_arg": 3},
6: {"c_array_length_in_arg": 5},
7: {"already_cfretained": True},
}
},
),
"sec_protocol_options_add_tls_application_protocol": (
b"v@^t",
"",
{"arguments": {1: {"c_array_delimited_by_null": True, "type_modifier": "n"}}},
),
"CMSDecoderCopySignerTimestampWithPolicy": (
b"i@@Lo^d",
"",
{"arguments": {3: {"already_cfretained": True}}},
),
"SSLRead": (
b"i@o^vLo^L",
"",
{"arguments": {1: {"c_array_length_in_arg": (2, 3)}}},
),
"SecTaskCopyValueForEntitlement": (
b"@@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"sec_protocol_metadata_get_negotiated_tls_ciphersuite": (b"I@",),
"SSLGetDatagramWriteSize": (b"i@o^L",),
"SecIdentitySetPreferred": (b"i@@@",),
"SecTrustCopyAnchorCertificates": (
b"io^@",
"",
{"arguments": {0: {"already_cfretained": True}}},
),
"sec_protocol_options_add_tls_ciphersuite": (b"v@i",),
"SecKeychainSetDomainDefault": (b"ii@",),
"sec_protocol_options_get_default_max_dtls_protocol_version": (b"i@",),
"SecCertificateGetTypeID": (sel32or64(b"I", b"Q"),),
"SecCertificateCopyShortDescription": (
b"@@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SSLCopyRequestedPeerNameLength": (b"i@o^L",),
"CMSEncoderCopySignerTimestamp": (b"i@Lo^d",),
"SSLSetClientSideAuthenticate": (b"i@i",),
"sec_protocol_metadata_get_early_data_accepted": (b"B@",),
"SecCodeCopySelf": (b"iIo^@", "", {"arguments": {1: {"already_cfretained": True}}}),
"SSLGetNumberSupportedCiphers": (b"i@o^L",),
"SecIdentityCopyPreferred": (b"@@@@", "", {"retval": {"already_cfretained": True}}),
"SecACLUpdateAuthorizations": (b"i@@",),
"SecAccessGetTypeID": (sel32or64(b"I", b"Q"),),
"SecKeychainItemGetTypeID": (sel32or64(b"I", b"Q"),),
"SecTrustSetKeychains": (b"i@@",),
"SSLGetProtocolVersionMax": (b"i@o^i",),
"SecKeyCreateDecryptedData": (
b"@@@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecKeyCopyPublicKey": (b"@@", "", {"retval": {"already_cfretained": True}}),
"SecGroupTransformGetTypeID": (sel32or64(b"I", b"Q"),),
"SecKeychainCopySettings": (b"i@o^{SecKeychainSettings=IZZI}",),
"SecTrustSettingsCreateExternalRepresentation": (
b"iIo^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecCertificateCopyKey": (b"@@", "", {"retval": {"already_cfretained": True}}),
"SecTrustGetTrustResult": (b"i@o^I",),
"SSLSetDiffieHellmanParams": (
b"i@n^vL",
"",
{"arguments": {1: {"c_array_length_in_arg": 2}}},
),
"SecTrustSettingsRemoveTrustSettings": (b"i@I",),
"SecRequirementCreateWithString": (
b"i@Io^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecKeychainSetDomainSearchList": (b"ii@",),
"sec_protocol_options_set_tls_diffie_hellman_parameters": (b"v@@",),
"SecRequirementCopyString": (
b"i@Io^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SSLWrite": (b"i@n^vLo^L", "", {"arguments": {1: {"c_array_length_in_arg": 2}}}),
"sec_protocol_metadata_peers_are_equal": (b"B@@",),
"SSLSetPeerID": (b"i@n^vL", "", {"arguments": {1: {"c_array_length_in_arg": 2}}}),
"sec_protocol_options_get_default_min_dtls_protocol_version": (b"i@",),
"SecTransformCreate": (b"@@o^@", "", {"retval": {"already_cfretained": True}}),
"SecCertificateCopyNormalizedSubjectSequence": (
b"@@",
"",
{"retval": {"already_cfretained": True}},
),
"SecKeychainCopyDomainSearchList": (
b"iio^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecKeyCreateFromData": (b"@@@o^@", "", {"retval": {"already_cfretained": True}}),
"SecTaskCopyValuesForEntitlements": (
b"@@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecStaticCodeGetTypeID": (sel32or64(b"I", b"Q"),),
"SecItemExport": (
b"i@IIn^{_SecItemImportExportKeyParameters=II@@@@@@}o^@",
"",
{"arguments": {4: {"already_cfretained": True}}},
),
"SSLSetProtocolVersionMin": (b"i@i",),
"SecCertificateCopyLongDescription": (
b"@@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecKeychainSetAccess": (b"i@@",),
"sec_protocol_options_set_pre_shared_key_selection_block": (
b"v@@?^{dispatch_queue_s}",
"",
{
"arguments": {
1: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": "^v"},
1: {"type": "@"},
2: {"type": "@"},
3: {
"callable": {
"retval": {"type": "v"},
"arguments": {0: {"type": "^v"}, 1: {"type": "@"}},
},
"type": "@?",
},
},
}
}
}
},
),
"sec_protocol_options_add_pre_shared_key": (b"v@",),
"SecKeychainSetSearchList": (b"i@",),
"CMSEncoderCopyRecipients": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"sec_protocol_options_set_key_update_block": (
b"v@@?^{dispatch_queue_s}",
"",
{
"arguments": {
1: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": "^v"},
1: {"type": "@"},
2: {
"callable": {
"retval": {"type": "v"},
"arguments": {0: {"type": "^v"}},
},
"type": "@?",
},
},
}
}
}
},
),
"SecACLCopyAuthorizations": (b"@@", "", {"retval": {"already_cfretained": True}}),
"SecTrustEvaluateAsyncWithError": (
b"i@^{dispatch_queue_s}@?",
"",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": "^v"},
1: {"type": "@"},
2: {"type": "B"},
3: {"type": "@"},
},
}
}
}
},
),
"SSLGetEnabledCiphers": (
b"i@o^iN^L",
"",
{"arguments": {1: {"c_array_length_in_arg": 2}}},
),
"SecKeychainGetPreferenceDomain": (b"io^i",),
"SecKeychainGetVersion": (b"io^I",),
"SecKeyCreateRandomKey": (b"@@o^@", "", {"retval": {"already_cfretained": True}}),
"sec_protocol_options_set_tls_renegotiation_enabled": (b"vB",),
"SSLGetPeerDomainNameLength": (b"i@o^L",),
"sec_protocol_options_are_equal": (b"B@@",),
"SecCertificateCopySubjectSummary": (
b"@@",
"",
{"retval": {"already_cfretained": True}},
),
"CMSDecoderCopySignerEmailAddress": (
b"i@Lo^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecTrustSetSignedCertificateTimestamps": (b"i@@",),
"SecTrustSetOCSPResponse": (b"i@@",),
"SSLSetCertificateAuthorities": (b"i@@Z",),
"SecACLCreateWithSimpleContents": (
b"i@@@So^@",
"",
{"arguments": {4: {"already_cfretained": True}}},
),
"SecTrustGetTypeID": (sel32or64(b"I", b"Q"),),
"SecTrustedApplicationCreateFromPath": (
b"i^to^@",
"",
{
"arguments": {
0: {"c_array_delimited_by_null": True, "type_modifier": "n"},
1: {"already_cfretained": True},
}
},
),
"SSLSetOCSPResponse": (b"i@@",),
"SecTrustGetVerifyTime": (b"d@",),
"SecTransformExecuteAsync": (
b"v@^{dispatch_queue_s=}@?",
"",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": "^v"},
1: {"type": "@"},
2: {"type": "@"},
3: {"type": "Z"},
},
}
}
}
},
),
"SecIdentityCopyCertificate": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecKeychainAddInternetPassword": (
b"i@In^tIn^tIn^tIn^tSIIIn^vo^@",
"",
{
"arguments": {
2: {"c_array_length_in_arg": 1},
4: {"c_array_length_in_arg": 3},
6: {"c_array_length_in_arg": 5},
8: {"c_array_length_in_arg": 7},
13: {"c_array_length_in_arg": 12},
14: {"already_cfretained": True},
}
},
),
"SecKeychainSetSettings": (b"i@n^{SecKeychainSettings=IZZI}",),
"SecIdentitySetSystemIdentity": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"CMSEncoderSetHasDetachedContent": (b"i@Z",),
"SecEncodeTransformCreate": (
b"@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecKeychainGetUserInteractionAllowed": (b"io^Z",),
"sec_protocol_metadata_create_secret": (
b"@@L^tL",
"",
{
"retval": {"already_retained": True},
"arguments": {2: {"type_modifier": "n", "c_array_length_in_arg": 1}},
},
),
"SecTrustCopyPolicies": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecKeychainSetDefault": (b"i@",),
"SecCodeCopyGuestWithAttributes": (
b"i@@Io^@",
"",
{"arguments": {3: {"already_cfretained": True}}},
),
"SecTrustSetAnchorCertificates": (b"i@@",),
"SecKeychainGetTypeID": (sel32or64(b"I", b"Q"),),
"SecCertificateSetPreferred": (b"i@@@",),
"SecCertificateCopyNormalizedIssuerSequence": (
b"@@",
"",
{"retval": {"already_cfretained": True}},
),
"sec_protocol_options_set_tls_server_name": (
b"v@^t",
"",
{"arguments": {1: {"c_array_delimited_by_null": True, "type_modifier": "n"}}},
),
"SecKeyGetTypeID": (sel32or64(b"I", b"Q"),),
"sec_protocol_set_local_identity": (b"v@@",),
"sec_protocol_metadata_get_negotiated_protocol_version": (b"i@",),
"SecItemCopyMatching": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecItemUpdate": (b"i@@",),
"CMSDecoderGetTypeID": (sel32or64(b"I", b"Q"),),
"SecTrustedApplicationCopyData": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecKeychainItemCopyAccess": (
b"i@o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"SecKeyUnwrapSymmetric": (
b"@n^@@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecTaskCopySigningIdentifier": (
b"@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecureDownloadCopyTicketLocation": (
b"i^{OpaqueSecureDownload=}o^@",
"",
{"arguments": {1: {"already_cfretained": True}}},
),
"sec_protocol_options_set_verify_block": (
b"v@@?^{dispatch_queue_s}",
"",
{
"arguments": {
1: {
"callable": {
"retval": {"type": b"v"},
"arguments": {
0: {"type": "^v"},
1: {"type": "@"},
2: {"type": "@"},
3: {
"callable": {
"retval": {"type": "v"},
"arguments": {0: {"type": "^v"}},
},
"type": "@?",
},
},
}
}
}
},
),
"SecTranformCustomGetAttribute": (
sel32or64(
b"@^{OpaqueSecTransformImplementation=}@i",
b"@^{OpaqueSecTransformImplementation=}@q",
),
),
"SecKeychainCopyDefault": (
b"io^@",
"",
{"arguments": {0: {"already_cfretained": True}}},
),
"SSLSetEnabledCiphers": (
b"i@n^iL",
"",
{"arguments": {1: {"c_array_length_in_arg": 2}}},
),
"sec_protocol_metadata_access_peer_certificate_chain": (
b"B@@?",
"",
{
"arguments": {
1: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": "^v"}, 1: {"type": "@"}},
}
}
}
},
),
"SecHostRemoveGuest": (b"iIII",),
"SSLClose": (b"i@",),
"SecKeyDeriveFromPassword": (
b"@@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"SecAccessControlGetTypeID": (sel32or64(b"I", b"Q"),),
"SecKeyCopyAttributes": (b"@@", "", {"retval": {"already_cfretained": True}}),
"AuthorizationRightRemove": (
b"i^{AuthorizationOpaqueRef=}^t",
"",
{"arguments": {1: {"c_array_delimited_by_null": True, "type_modifier": "n"}}},
),
"SecTrustSettingsCopyTrustSettings": (
b"i@Io^@",
"",
{"arguments": {2: {"already_cfretained": True}}},
),
"SecEncryptTransformCreate": (
b"@@o^@",
"",
{"retval": {"already_cfretained": True}},
),
"CMSEncoderCreate": (b"io^@", "", {"arguments": {0: {"already_cfretained": True}}}),
"CMSEncodeContent": (
b"i@@@ZIn^vLo^@",
"",
{
"arguments": {
5: {"c_array_length_in_arg": 6},
7: {"already_cfretained": True},
}
},
),
}
aliases = {
"SecTransformCustomGetAttribute": "SecTranformCustomGetAttribute",
"errSessionInvalidFlags": "errAuthorizationInvalidFlags",
"errSecCSSigDBAccess": "errSecCSDBAccess",
"errSSLServerAuthCompleted": "errSSLPeerAuthCompleted",
"errSSLLast": "errSSLUnexpectedRecord",
"errSessionInternal": "errAuthorizationInternal",
"kSecRequirementTypeCount": "kSecInvalidRequirementType",
"errSSLClientAuthCompleted": "errSSLPeerAuthCompleted",
"errSecCSSigDBDenied": "errSecCSDBDenied",
}
cftypes = [
("SecKeyRef", b"^{OpaqueSecKeyRef=}", "SecKeyGetTypeID", None),
("SecPasswordRef", b"^{OpaqueSecPasswordRef=}", "SecPasswordGetTypeID", None),
(
"SecKeychainItemRef",
b"^{OpaqueSecKeychainItemRef=}",
"SecKeyChainItemGetTypeID",
None,
),
("SecTaskRef", b"^{__SecTask=}", "SecTaskGetTypeID", None),
(
"SecCertificateRef",
b"^{OpaqueSecCertificateRef=}",
"SecCertificateGetTypeID",
None,
),
(
"SecTrustedApplicationRef",
b"^{OpaqueSecTrustedApplicationRef=}",
"SecTrustedApplicationGetTypeID",
None,
),
("CMSEncoderRef", b"^{_CMSEncoder=}", "CMSEncoderGetTypeID", None),
(
"SecAccessControlRef",
b"^{OpaqueSecAccessControlRef=}",
"SecAccessControlGetTypeID",
None,
),
("SecCodeRef", b"^{__SecCode=}", "SecCodeGetTypeID", None),
("CMSDecoderRef", b"^{_CMSDecoder=}", "CMSDecoderGetTypeID", None),
("SecAccessRef", b"^{OpaqueSecAccessRef=}", "SecAccessGetTypeID", None),
("SecIdentityRef", b"^{OpaqueSecIdentityRef=}", "SecIdentityGetTypeID", None),
("SSLContextRef", b"^{SSLContext=}", "SSLContextGetTypeID", None),
("SecRequirementRef", b"^{__SecRequirement=}", "SecRequirementGetTypeID", None),
("SecPolicyRef", b"^{OpaqueSecPolicyRef=}", "SecPolicyGetTypeID", None),
("SecTrustRef", b"^{__SecTrust=}", "SecTrustGetTypeID", None),
("SecACLRef", b"^{OpaqueSecTrustRef=}", "SecACLGetTypeID", None),
("SecKeychainRef", b"^{OpaqueSecKeychainRef=}", "SecKeyChainGetTypeID", None),
]
misc.update(
{
"AuthorizationRef": objc.createOpaquePointerType(
"AuthorizationRef", b"^{AuthorizationOpaqueRef=}"
),
"SecureDownloadRef": objc.createOpaquePointerType(
"SecureDownloadRef", b"^{OpaqueSecureDownload=}"
),
"SecRandomRef": objc.createOpaquePointerType(
"SecRandomRef", b"^{__SecRandom=}"
),
"SecTransformImplementationRef": objc.createOpaquePointerType(
"SecTransformImplementationRef", b"^{OpaqueSecTransformImplementation=}"
),
}
)
expressions = {"kAuthorizationEmptyEnvironment": "None"}
# END OF FILE
| [
"shanatmail@gmail.com"
] | shanatmail@gmail.com |
53958bdf8d98274e02100244da10842fb11fe873 | dd1ceeb7e7ad5e49d901daf2bc697feb80d5f9c7 | /11b.py | 9b128c7132c9faa897c460c6e29af8d35560b810 | [] | no_license | MrVineetPatil/unix | d30ecccdfa9ba8a1d4431a76cb7a7c2b4a09d041 | 0dee7563baa0a18627a0db1a743bd14f5d3aa84b | refs/heads/master | 2020-05-27T19:20:54.091297 | 2019-05-27T02:47:13 | 2019-05-27T02:47:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | def remove(l,r):
k=[]
k+=l
if r in l:
k.remove(r)
return k
def subsetsum(l,t):
k=[ (x,y,z) for x in l for y in remove(l,x) for z in remove(remove(l,x),y) if
x+y+z==t]
if len(k):
return True
return False
print("Enter the list elements:",end='')
l=[int(x) for x in input().split()]
t=int(input("Enter the target\n"))
print(subsetsum(l,t)) | [
"noreply@github.com"
] | noreply@github.com |
c394f35d81a2eb6ac4c455dd44b7add384a8b18b | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/84/usersdata/203/57032/submittedfiles/lista1.py | 4fe9d85ddd31f1891fa346e0bba2e39623b993ce | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 360 | py | # -*- coding: utf-8 -*-
n=int(input('tamanho da lista: '))
l=[]
somai=0
qi=0
somap=0
qp=0
for i in range (1,n+1,1):
a=int(input('elemento da lista: '))
l.append(a)
for i in range (1,len(lista),1):
if l(i)%2==0:
somap=somap+l(i)
qp=qp+1
else:
somai=somai+l(i)
qi=qi+1
print(somai)
print(somap)
print(qi)
print(qp) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.