blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32b78eb1f1df9810ef6d6a8e263d2c892c314d86 | 8eb81737683f25f3cacf4319807ae75ed506f8fc | /entrega/codigo/competencia/spam_filter.py | ce089a429547180342743c533f4f8a7c4d34994e | [] | no_license | manucosta/aa-tp1 | 269e884662574067f691d83df8bc767601be0bce | af59d0b6b41f376617346e2df6ebd06c4fb94bca | refs/heads/master | 2021-04-30T22:22:24.194361 | 2017-07-09T23:15:30 | 2017-07-09T23:15:30 | 66,563,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,341 | py | from utilities import *
from scipy.sparse import coo_matrix, hstack
from sklearn.cross_validation import cross_val_score, KFold
from sklearn import ensemble
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import fbeta_score, make_scorer
import numpy as np
import pickle
import sys
json_mails = sys.argv[1]
# Leo los mails (poner los paths correctos).
mails_txt= json.load(open(json_mails))
# Pongo todos los mails en minusculas
mails_txt = map(lambda x: x.lower(), mails_txt)
#print "Lei json y arme data frame"
# Extraigo atributos simples
# Agrego feature que clasifica los mails segun tienen o no html
HTML = coo_matrix(map(hasHTML, mails_txt)).transpose()
#) Agrego feature que clasifica los mails segun tienen o no subject
SUBJ = coo_matrix(map(hasSubject, mails_txt)).transpose()
# Longitud del mail.
LEN = coo_matrix(map(len, mails_txt)).transpose()
# Cantidad de espacios en el mail.
SPACES = coo_matrix(map(count_spaces, mails_txt)).transpose()
#print "Clasifique por atributos simples"
vectorizer = obtenerVectorizer()
word_freq_matrix = vectorizer.transform(mails_txt)
#print "Arme matriz"
X = hstack([HTML, SUBJ, LEN, SPACES, word_freq_matrix]).toarray()
clf = pickle.load( open('ranfor.pickle') )
y_predic = clf.predict(X)
for p in y_predic:
if p == 1:
print 'spam'
else:
print 'ham'
| [
"manucos94@gmail.com"
] | manucos94@gmail.com |
5bbb358a632d9bba20e2078a0a95695607f33fff | 1a87d286396a2c6f6b6ac7c53495f80690836c7b | /LC/LC_testJustification.py | e1b9c5fe604fe74fbcb2713c10b062f9b244c481 | [] | no_license | kickbean/LeetCode | 14d33eea9dd70821114ca6d7e1a32111d4d64bf0 | 92e4de152e2aae297ef0e93c9eea61d7ad718f4e | refs/heads/master | 2016-09-10T14:38:33.692759 | 2014-04-08T00:26:51 | 2014-04-08T00:26:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,565 | py | '''
Given an array of words and a length L, format the text such that each line has exactly L characters and is fully (left and right) justified.
You should pack your words in a greedy approach; that is, pack as many words as you can in each line. Pad extra spaces ' ' when necessary so that each line has exactly L characters.
Extra spaces between words should be distributed as evenly as possible. If the number of spaces on a line do not divide evenly between words, the empty slots on the left will be assigned more spaces than the slots on the right.
For the last line of text, it should be left justified and no extra space is inserted between words.
For example,
words: ["This", "is", "an", "example", "of", "text", "justification."]
L: 16.
Return the formatted lines as:
[
"This is an",
"example of text",
"justification. "
]
Note: Each word is guaranteed not to exceed L in length.
click to show corner cases.
Corner Cases:
A line other than the last line might contain only one word. What should you do in this case?
In this case, that line should be left-justified.
Created on Feb 3, 2014
@author: Songfan
'''
def solution(words, L):
n = len(words)
if n == 0: return words
res = []
currWords = []
availableSpace = L
for wi in range(n):
w = words[wi]
wLen = len(w)
if wLen < availableSpace:
currWords.append(w)
availableSpace -= wLen + 1
else:
res.append(combineWords(currWords, L))
currWords = [w]
availableSpace = L - wLen - 1
if len(currWords):
res.append(w + ' ' * (L - wLen))
return res
def combineWords(words, L):
wordNum = len(words)
wordLen = 0
for w in words:
wordLen += len(w)
spaceNumTotal = L - wordLen
if wordNum == 1:
return words[0] + ' ' * spaceNumTotal
spaceNum = spaceNumTotal // (wordNum - 1)
additionalSpace = spaceNumTotal % (wordNum - 1)
res = ''
for wi in range(wordNum):
if wi == wordNum - 1:
res += words[wi]
elif additionalSpace > 0:
res += words[wi] + ' ' * (spaceNum + 1)
additionalSpace -= 1
else:
res += words[wi] + ' ' * spaceNum
return res
words = ["This", "is", "an", "example", "of", "text", "justification."]
L = 16
print solution(words, L)
words = ["This", "is", "an", "vervverycrazy", "example", "of", "text", "justification."]
L = 16
print solution(words, L)
| [
"songfan.yang@gmail.com"
] | songfan.yang@gmail.com |
1030b34272f32e34932e1f87a1940f711b6194bb | d3c4dd428f7d73b75e59668257b1f56e3b7f9c04 | /practice_package_distrubtion/Lib/site-packages/pylint/checkers/similar.py | d87a2b132358472fdda70db24559fb562e1499c1 | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-python-cwi",
"GPL-1.0-or-later",
"LicenseRef-scancode-newlib-historical",
"OpenSSL",
"bzip2-1.0.6",
"Python-2.0",
"TCL",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-other-copyl... | permissive | chrismigut/python_packagingPythonProjects | d67dc2d7d9222337f52704c4563b24dcf0bd8d0c | e8bc9eec075b77413d31d4640a7feb6c82ddc96c | refs/heads/master | 2022-10-25T23:05:53.882654 | 2019-09-29T18:47:52 | 2019-09-29T18:47:52 | 211,695,918 | 0 | 1 | MIT | 2022-10-14T22:21:44 | 2019-09-29T16:58:15 | Python | UTF-8 | Python | false | false | 15,015 | py | # Copyright (c) 2006, 2008-2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2012 Ry4an Brase <ry4an-hg@ry4an.org>
# Copyright (c) 2012 Google, Inc.
# Copyright (c) 2012 Anthony VEREZ <anthony.verez.external@cassidian.com>
# Copyright (c) 2014-2018 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2017 Anthony Sottile <asottile@umich.edu>
# Copyright (c) 2017 Mikhail Fesenko <proggga@gmail.com>
# Copyright (c) 2018 ssolanki <sushobhitsolanki@gmail.com>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
# pylint: disable=redefined-builtin
"""a similarities / code duplication command line tool and pylint checker
"""
import sys
from collections import defaultdict
from itertools import groupby
import astroid
from pylint.checkers import BaseChecker, table_lines_from_stats
from pylint.interfaces import IRawChecker
from pylint.reporters.ureports.nodes import Table
from pylint.utils import decoding_stream
class Similar:
"""finds copy-pasted lines of code in a project"""
def __init__(
self,
min_lines=4,
ignore_comments=False,
ignore_docstrings=False,
ignore_imports=False,
):
self.min_lines = min_lines
self.ignore_comments = ignore_comments
self.ignore_docstrings = ignore_docstrings
self.ignore_imports = ignore_imports
self.linesets = []
def append_stream(self, streamid, stream, encoding=None):
"""append a file to search for similarities"""
if encoding is None:
readlines = stream.readlines
else:
readlines = decoding_stream(stream, encoding).readlines
try:
self.linesets.append(
LineSet(
streamid,
readlines(),
self.ignore_comments,
self.ignore_docstrings,
self.ignore_imports,
)
)
except UnicodeDecodeError:
pass
def run(self):
"""start looking for similarities and display results on stdout"""
self._display_sims(self._compute_sims())
def _compute_sims(self):
"""compute similarities in appended files"""
no_duplicates = defaultdict(list)
for num, lineset1, idx1, lineset2, idx2 in self._iter_sims():
duplicate = no_duplicates[num]
for couples in duplicate:
if (lineset1, idx1) in couples or (lineset2, idx2) in couples:
couples.add((lineset1, idx1))
couples.add((lineset2, idx2))
break
else:
duplicate.append({(lineset1, idx1), (lineset2, idx2)})
sims = []
for num, ensembles in no_duplicates.items():
for couples in ensembles:
sims.append((num, couples))
sims.sort()
sims.reverse()
return sims
def _display_sims(self, sims):
"""display computed similarities on stdout"""
nb_lignes_dupliquees = 0
for num, couples in sims:
print()
print(num, "similar lines in", len(couples), "files")
couples = sorted(couples)
lineset = idx = None
for lineset, idx in couples:
print("==%s:%s" % (lineset.name, idx))
if lineset:
for line in lineset._real_lines[idx : idx + num]:
print(" ", line.rstrip())
nb_lignes_dupliquees += num * (len(couples) - 1)
nb_total_lignes = sum([len(lineset) for lineset in self.linesets])
print(
"TOTAL lines=%s duplicates=%s percent=%.2f"
% (
nb_total_lignes,
nb_lignes_dupliquees,
nb_lignes_dupliquees * 100.0 / nb_total_lignes,
)
)
def _find_common(self, lineset1, lineset2):
"""find similarities in the two given linesets"""
lines1 = lineset1.enumerate_stripped
lines2 = lineset2.enumerate_stripped
find = lineset2.find
index1 = 0
min_lines = self.min_lines
while index1 < len(lineset1):
skip = 1
num = 0
for index2 in find(lineset1[index1]):
non_blank = 0
for num, ((_, line1), (_, line2)) in enumerate(
zip(lines1(index1), lines2(index2))
):
if line1 != line2:
if non_blank > min_lines:
yield num, lineset1, index1, lineset2, index2
skip = max(skip, num)
break
if line1:
non_blank += 1
else:
# we may have reach the end
num += 1
if non_blank > min_lines:
yield num, lineset1, index1, lineset2, index2
skip = max(skip, num)
index1 += skip
def _iter_sims(self):
"""iterate on similarities among all files, by making a cartesian
product
"""
for idx, lineset in enumerate(self.linesets[:-1]):
for lineset2 in self.linesets[idx + 1 :]:
for sim in self._find_common(lineset, lineset2):
yield sim
def stripped_lines(lines, ignore_comments, ignore_docstrings, ignore_imports):
"""return lines with leading/trailing whitespace and any ignored code
features removed
"""
if ignore_imports:
tree = astroid.parse("".join(lines))
node_is_import_by_lineno = (
(node.lineno, isinstance(node, (astroid.Import, astroid.ImportFrom)))
for node in tree.body
)
line_begins_import = {
lineno: all(is_import for _, is_import in node_is_import_group)
for lineno, node_is_import_group in groupby(
node_is_import_by_lineno, key=lambda x: x[0]
)
}
current_line_is_import = False
strippedlines = []
docstring = None
for lineno, line in enumerate(lines, start=1):
line = line.strip()
if ignore_docstrings:
if not docstring and any(
line.startswith(i) for i in ['"""', "'''", 'r"""', "r'''"]
):
docstring = line[:3]
line = line[3:]
if docstring:
if line.endswith(docstring):
docstring = None
line = ""
if ignore_imports:
current_line_is_import = line_begins_import.get(
lineno, current_line_is_import
)
if current_line_is_import:
line = ""
if ignore_comments:
line = line.split("#", 1)[0].strip()
strippedlines.append(line)
return strippedlines
class LineSet:
"""Holds and indexes all the lines of a single source file"""
def __init__(
self,
name,
lines,
ignore_comments=False,
ignore_docstrings=False,
ignore_imports=False,
):
self.name = name
self._real_lines = lines
self._stripped_lines = stripped_lines(
lines, ignore_comments, ignore_docstrings, ignore_imports
)
self._index = self._mk_index()
def __str__(self):
return "<Lineset for %s>" % self.name
def __len__(self):
return len(self._real_lines)
def __getitem__(self, index):
return self._stripped_lines[index]
def __lt__(self, other):
return self.name < other.name
def __hash__(self):
return id(self)
def enumerate_stripped(self, start_at=0):
"""return an iterator on stripped lines, starting from a given index
if specified, else 0
"""
idx = start_at
if start_at:
lines = self._stripped_lines[start_at:]
else:
lines = self._stripped_lines
for line in lines:
# if line:
yield idx, line
idx += 1
def find(self, stripped_line):
"""return positions of the given stripped line in this set"""
return self._index.get(stripped_line, ())
def _mk_index(self):
"""create the index for this set"""
index = defaultdict(list)
for line_no, line in enumerate(self._stripped_lines):
if line:
index[line].append(line_no)
return index
MSGS = {
"R0801": (
"Similar lines in %s files\n%s",
"duplicate-code",
"Indicates that a set of similar lines has been detected "
"among multiple file. This usually means that the code should "
"be refactored to avoid this duplication.",
)
}
def report_similarities(sect, stats, old_stats):
"""make a layout with some stats about duplication"""
lines = ["", "now", "previous", "difference"]
lines += table_lines_from_stats(
stats, old_stats, ("nb_duplicated_lines", "percent_duplicated_lines")
)
sect.append(Table(children=lines, cols=4, rheaders=1, cheaders=1))
# wrapper to get a pylint checker from the similar class
class SimilarChecker(BaseChecker, Similar):
"""checks for similarities and duplicated code. This computation may be
memory / CPU intensive, so you should disable it if you experiment some
problems.
"""
__implements__ = (IRawChecker,)
# configuration section name
name = "similarities"
# messages
msgs = MSGS
# configuration options
# for available dict keys/values see the optik parser 'add_option' method
options = (
(
"min-similarity-lines", # type: ignore
{
"default": 4,
"type": "int",
"metavar": "<int>",
"help": "Minimum lines number of a similarity.",
},
),
(
"ignore-comments",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Ignore comments when computing similarities.",
},
),
(
"ignore-docstrings",
{
"default": True,
"type": "yn",
"metavar": "<y or n>",
"help": "Ignore docstrings when computing similarities.",
},
),
(
"ignore-imports",
{
"default": False,
"type": "yn",
"metavar": "<y or n>",
"help": "Ignore imports when computing similarities.",
},
),
)
# reports
reports = (("RP0801", "Duplication", report_similarities),) # type: ignore
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
Similar.__init__(
self, min_lines=4, ignore_comments=True, ignore_docstrings=True
)
self.stats = None
def set_option(self, optname, value, action=None, optdict=None):
"""method called to set an option (registered in the options list)
overridden to report options setting to Similar
"""
BaseChecker.set_option(self, optname, value, action, optdict)
if optname == "min-similarity-lines":
self.min_lines = self.config.min_similarity_lines
elif optname == "ignore-comments":
self.ignore_comments = self.config.ignore_comments
elif optname == "ignore-docstrings":
self.ignore_docstrings = self.config.ignore_docstrings
elif optname == "ignore-imports":
self.ignore_imports = self.config.ignore_imports
def open(self):
"""init the checkers: reset linesets and statistics information"""
self.linesets = []
self.stats = self.linter.add_stats(
nb_duplicated_lines=0, percent_duplicated_lines=0
)
def process_module(self, node):
"""process a module
the module's content is accessible via the stream object
stream must implement the readlines method
"""
with node.stream() as stream:
self.append_stream(self.linter.current_name, stream, node.file_encoding)
def close(self):
"""compute and display similarities on closing (i.e. end of parsing)"""
total = sum(len(lineset) for lineset in self.linesets)
duplicated = 0
stats = self.stats
for num, couples in self._compute_sims():
msg = []
lineset = idx = None
for lineset, idx in couples:
msg.append("==%s:%s" % (lineset.name, idx))
msg.sort()
if lineset:
for line in lineset._real_lines[idx : idx + num]:
msg.append(line.rstrip())
self.add_message("R0801", args=(len(couples), "\n".join(msg)))
duplicated += num * (len(couples) - 1)
stats["nb_duplicated_lines"] = duplicated
stats["percent_duplicated_lines"] = total and duplicated * 100.0 / total
def register(linter):
"""required method to auto register this checker """
linter.register_checker(SimilarChecker(linter))
def usage(status=0):
"""display command line usage information"""
print("finds copy pasted blocks in a set of files")
print()
print(
"Usage: symilar [-d|--duplicates min_duplicated_lines] \
[-i|--ignore-comments] [--ignore-docstrings] [--ignore-imports] file1..."
)
sys.exit(status)
def Run(argv=None):
"""standalone command line access point"""
if argv is None:
argv = sys.argv[1:]
from getopt import getopt
s_opts = "hdi"
l_opts = (
"help",
"duplicates=",
"ignore-comments",
"ignore-imports",
"ignore-docstrings",
)
min_lines = 4
ignore_comments = False
ignore_docstrings = False
ignore_imports = False
opts, args = getopt(argv, s_opts, l_opts)
for opt, val in opts:
if opt in ("-d", "--duplicates"):
min_lines = int(val)
elif opt in ("-h", "--help"):
usage()
elif opt in ("-i", "--ignore-comments"):
ignore_comments = True
elif opt in ("--ignore-docstrings",):
ignore_docstrings = True
elif opt in ("--ignore-imports",):
ignore_imports = True
if not args:
usage(1)
sim = Similar(min_lines, ignore_comments, ignore_docstrings, ignore_imports)
for filename in args:
with open(filename) as stream:
sim.append_stream(filename, stream)
sim.run()
sys.exit(0)
if __name__ == "__main__":
Run()
| [
"cmiguthere@yahoo.com"
] | cmiguthere@yahoo.com |
336fca8c8867658fc290a050e6d3a2aebefbdc44 | 4bd84ccf165322003c79ae7239b212b7d03fe43f | /account/migrations/0001_initial.py | 8eeff2d110c502d2264e1f3527c2c8ee45c2ddf2 | [] | no_license | sanudatta11/Project_PUR | 5fb1b1cac123f2ed3c26138d0757752279efd050 | d5a8d4f202347715dd948158c4463611dee11e08 | refs/heads/master | 2021-01-19T00:27:45.226577 | 2017-08-23T09:24:57 | 2017-08-23T09:24:57 | 100,568,543 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,508 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-17 10:08
from __future__ import unicode_literals
import datetime
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='children',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('age', models.IntegerField()),
],
),
migrations.CreateModel(
name='operations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('pur', models.CharField(max_length=14, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z@.]*$', 'Only alphanumeric characters are allowed.')])),
('aadharhof', models.CharField(max_length=50, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z@.]*$', 'Only alphanumeric characters are allowed.')])),
('mobile', models.IntegerField(validators=[django.core.validators.MinLengthValidator(10)])),
('date', models.DateField(default=datetime.datetime(2017, 8, 17, 10, 8, 5, 75472, tzinfo=utc))),
],
),
migrations.CreateModel(
name='profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=30)),
('aadharid', models.CharField(max_length=50, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z@.]*$', 'Only alphanumeric characters are allowed.')])),
('email', models.CharField(max_length=20, null=True, validators=[django.core.validators.RegexValidator('^[0-9a-zA-Z@.]*$', 'Only Email characters are allowed.')])),
],
),
migrations.AddField(
model_name='children',
name='parent',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='account.profile'),
),
]
| [
"sanudatta11@gmail.com"
] | sanudatta11@gmail.com |
5947d1f33371f5d3309ccd9cad4b3241edda9364 | 1203b1506cc296a3f83984ffbffc122418f6b04b | /libs/incomplete/preproces.py | ad830b5b4368c39bc4433f6ed1709ce67e905b26 | [] | no_license | mak12776/py-libs | ca7c4ad46e4d501ae7a65aedddb5cd452e010eb5 | 6575d03d6d5c5541368134e45b90679892cdc3e8 | refs/heads/master | 2020-09-24T18:28:02.395151 | 2020-01-03T04:15:18 | 2020-01-03T04:15:18 | 225,816,844 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,325 | py |
import sys
import collections
class dotdict(dict):
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def starts_ends_with(string, prefix, suffix):
if string.startswith(prefix) and string.endswith(suffix):
return string[len(prefix): len(string) - len(suffix)]
return None
class CompilerError(Exception):
pass
def initial_compiler(vars):
vars.stack = collections.deque()
vars.tab = ''
def if_compiler(args, outfile, vars):
outfile.write('{}if {}:'.format(vars.tab, args))
vars.stack.append('if')
def end_compiler(args, outfile, vars):
try:
last_macro = vars.stack.popleft()
except IndexError:
raise CompilerError('end_without_preceding')
def finalize_compiler(vars):
pass
_default_macro_compilers = {
'if': if_compiler,
}
def _default_error_handler(error, info, *args):
print('compile error: {}: {}'.format(error, args))
print(info)
def process_file(
infile,
outfile,
macro_compilers = _default_macro_compilers.copy(),
error_handler = _default_error_handler,
macro_prefix = '#',
macro_suffix = '',
statement_prefix = '@',
statement_suffix = '',
):
vars = dotdict()
initial_compiler(vars)
lnum = 0
line = infile.readline()
while line:
lnum += 1
striped_line = line.strip()
macro_line = starts_ends_with(striped_line, macro_prefix, macro_suffix)
if macro_line is not None:
macro_args = macro_line.split(maxsplit = 1)
try:
macro, args = macro_args
except ValueError:
macro = macro_args[0]
args = ''
try:
macro = macro_compilers[macro]
except KeyError:
error_handler('unknown_macro', (lnum, line), macro)
return
try:
macro(args, outfile, vars)
except CompilerError as e:
error_handler(e.args[0], (lnum, line), *e.args[1:])
return
line = infile.readline()
finalize_compiler(vars)
def main(argv):
for name in argv[1:]:
with open(name) as infile:
process_file(infile, sys.stdout)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"mak12776@gmail.com"
] | mak12776@gmail.com |
1d5da1d2a4909417e66239a19f9f1020df5f7380 | ddc73535eb55d212387e609f7f49e889ca8f6c70 | /src/core/data_utils.py | 8de33e5bf24b194c965e8dc4eba1b01436976918 | [
"MIT"
] | permissive | erdl/legacy-scrape-util | b1a0aac18c1d7c6c196d9d8b9f7b84f63f3c271e | c2d777a222d842690b37532a984844768bba23b7 | refs/heads/master | 2021-01-15T17:49:57.785803 | 2018-05-22T18:58:38 | 2018-05-22T18:58:38 | 99,758,670 | 2 | 3 | MIT | 2018-05-21T23:18:56 | 2017-08-09T02:54:37 | Python | UTF-8 | Python | false | false | 8,076 | py | #!/usr/bin/env python3
import src.core.file_utils as file_utils
import src.core.error_utils as error_utils
from collections import namedtuple
import time
import toml
import os.path as path
import os
import sys
import csv
# Default data type to be returned by all data-acquisition
# scripts. Key requirement for interoperability between
# various steps/components ( esp. acquisition & reshaping ).
Row = namedtuple('row',['node','name','unit','timestamp','value'])
# error template for errors relating to `data_utils`
data_error = error_utils.error_template('`data_utils`')
def fmt_string(target):
target = str(target).strip()
elements = [e for e in target.split(' ') if e]
formatted = '-'.join(elements).lower()
return formatted
def row_generator(node,name,unit):
node = fmt_string(node)
name = fmt_string(name)
unit = fmt_string(unit)
gen = lambda t,v: Row(node,name,unit,float(t),float(v))
return gen
def custom_row_generator(fields):
custom = namedtuple('row',fields)
generator = lambda vals: custom(*vals)
return generator
# get a uid generator based on an ordered mapping of fields.
def get_uid_generator(key=None):
default = ['node','name','unit']
if not key: key = default
fields = Row._fields
fmt = lambda l: '-'.join(l).lower()
indexes = []
for item in key:
indexes.append(fields.index(item))
mkuid = lambda row: fmt((row[i] for i in indexes))
return mkuid
# check a configuration file against a prototype
# of its expected fields and types. `ident` must be
# the enclosing field name of the configuration value,
# `proto` and `data` represent the expected and actual
# data respectively. `mkerr` may optionally be supplied
# as an error message template with `section` and `context`
# previously supplied.
def check_config(ident,proto,config,mkerr=None):
# if no template is suppleid for error messages,
# use the `data_utils` default template with a generic
# context description.
if not mkerr:
context = 'checking configuration value for expected structure'
mkerr = data_error(context)
# ensure that we are using a single-step template.
assert isinstance(mkerr('testing...'),str)
# start the recursive field check.
field_check(mkerr,ident,proto,config)
# recursively check the contents of a given config value
# against a prototype of its expected fields and types.
def field_check(mkerr,ident,proto,data,stack=[]):
# create a trace string for clarity during recursion.
trace = lambda: '.'.join([ident,*stack])
for field in proto:
# check for the existence of field.
if not field in data:
msg = "expected to find field `{}` in `{}`"
error = mkerr(msg.format(field,trace()))
raise Exception(error)
# check for proper typing of field.
# the `dict if isinstance(proto[field],dict)` line
# allows us to descriminate between a type declaration
# of `dict` and an actual decitonary.
expect = dict if isinstance(proto[field],dict) else proto[field]
actual = type(data[field])
if expect != actual:
msg = "expected field `{}` in `{}` to be `{}` but it is `{}`"
error = mkerr(msg.format(field,trace(),expect,actual))
raise Exception(error)
# if prototype field is an actual dictionary, do a recursive check.
if isinstance(proto[field],dict):
field_check(mkerr,ident,proto[field],data[field],stack=[*stack,field])
# create a new row object, replacing the value of one or more fields
# based on a dict of the form { fieldname : newval }.
def update_row(mapping,row,constructor=Row):
fields = constructor._fields
asdict = {f:row[i] for i,f in enumerate(fields)}
for field in mapping:
if not field in asdict:
raise Exception('unrecognized field: ' + field)
asdict.update(mapping)
newrow = constructor(*(asdict[f] for f in fields))
return newrow
# sort rows into matching and non-matching list
# based upon a dict of form { field: matchstring }.
def match_rows(spec,rows,rowtype=Row):
fields = rowtype._fields
targets = [r for r in rows]
removed = []
for field in spec:
if not field in fields:
raise Exception('unrecognized field in matcher: ' + field)
target = spec[field]
index = fields.index(field)
fltr = make_row_matcher(target,index)
targets,removals = split_rows(fltr,targets)
removed += removals
return targets,removed
# generate a row filter based upon a match-string and
# an index. Ex: the args `(0,"*foo")` would generate
# a filter that returns true for any row whose first
# element ends with `foo`.
def make_row_matcher(target,index):
match = target.replace('*','')
sw = lambda s,m: s.lower().startswith(m.lower())
ew = lambda s,m: s.lower().endswith(m.lower())
if not '*' in target:
fltr = lambda r: match.lower() == r[index].lower()
elif sw(target,'*') and ew(target,'*'):
fltr = lambda r: match.lower() in r[index].lower()
elif sw(target,'*'):
fltr = lambda r: ew(r[index],match)
elif ew(target,'*'):
fltr = lambda r: sw(r[index],match)
else:
raise Exception('invalid match string: ' + target)
return fltr
# map a function across a specific field of
# a list of rows.
def map_rows(fn,target,rows,constructor=Row):
fields = constructor._fields
if not target in fields:
raise Exception('unrecognized field: ' + target)
index = fields.index(target)
mapped = []
for row in rows:
vals = list(row)
vals[index] = fn(vals[index])
mapped.append(constructor(*vals))
return mapped
# split rows by a pass/fail function.
def split_rows(fn,rows,target=None,rowtype=Row):
if target:
fields = rowtype._fields
if not target in fields:
raise Exception('unrecognized field: ' + target)
index = fields.index(target)
fltr = lambda r: fn(r[index])
else: fltr = fn
passed,failed = [],[]
for row in rows:
if fltr(row): passed.append(row)
else: failed.append(row)
return passed,failed
# generic nonce updater/generator. This is an experimental
# attempt to provide a single standardized handler for nonce
# values. Consider this an unstable API feature.
# requires, at a minimum, a `targets` dict of the form
# `{ 'some-uid': {}, ... }`.
def make_time_specs(targets,settings={},nonce={}):
# integer representing the current time.
now = int(time.time())
# get init time for nonces; default is two weeks into the past.
init = settings.get('init-time',now - 1209600)
# get maximum step time; default is two weeks.
step = settings.get('step-time',1209600)
# generate a dict specifying init time and maximum viable step
# length for a given uid. the maximum step length must be individually
# calculated, as some `nonce` values may be more recent than `now` - `step`.
mkspec = lambda i,s: { 'init': i, 'step': min((now,i + s)) - i }
times = {} # collector for the final values to be returned.
# iteratively generate time-spec for each uid in `targets`.
for uid,spec in targets.items():
# handle common pattern of dict/table being set to `true` or
# `"default"` to indicate that default values should be inferred.
spec = spec if isinstance(spec,dict) else {}
# use target-specific `step` if exists, else default.
tstep = spec.get('step',step)
# use nonce value for `init` if exists, else use
# target-specific val if exists, else default.
tinit = nonce.get(uid,spec.get('init',init))
# insert the time-spec under the given uid.
times[uid] = mkspec(int(tinit),int(tstep))
# quick sanity check.
assert len(targets) == len(times)
# pass back the time values. implementation is responsible
# for updating the actual nonce after scrape attempt.
return times
| [
"fspmarshall@gmail.com"
] | fspmarshall@gmail.com |
19d91b4694cc3976e5f8967c38c3a97745a77613 | ee300f9ca140da45165bb633fe25e2b6b3689354 | /function_global.py | 44454d76358b40211731b1c52aad2715f6cb21b5 | [] | no_license | zhengknight/pyExise | b20c64e4fdd64178d9e99b3bf9622b0a93943a73 | d5261f5025b8603385dcbb0d4042ef631b002119 | refs/heads/master | 2022-11-15T00:25:46.716871 | 2019-06-27T07:40:44 | 2019-06-27T07:40:44 | 192,630,777 | 0 | 1 | null | 2022-10-26T21:49:02 | 2019-06-19T00:35:43 | Python | UTF-8 | Python | false | false | 112 | py | x=50
def func():
global x
print('x is',x)
x=2
print('x changed to',x)
func()
print('x is ',x) | [
"zhengknight@qq.com"
] | zhengknight@qq.com |
7b32668abbb4ee08d72874dd26afb1570d2575f9 | 72fa9e96e9eeae6ac213e9355407168483f1a447 | /lecture2.py | 1c8186e0fc0694f8a369ba921be13081247e4b51 | [
"MIT"
] | permissive | kendallsmith327/IA-241 | d65dca1773692730836192725520642c3e1e0c1b | 7c1492ff635249849c20c083b5d7fc0518c1ab8a | refs/heads/main | 2023-05-02T11:50:35.945176 | 2021-05-04T14:40:08 | 2021-05-04T14:40:08 | 332,523,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | """
this is a regional comment
"""
# print('hello world') # this is a single line comment
# print( type('123') )
# print("it's our second python class")
#print('Hello' + 'World')
my_str = "hello world"
print(my_str)
my_str = 'second str'
print(my_str)
my_int = 2
my_float = 2.0
print(my_int + 3)
print(my_int * 3)
print(my_int ** 3)
print(my_int + my_float)
| [
"noreply@github.com"
] | kendallsmith327.noreply@github.com |
d4fe23b89890cf6d0dbee3005299fab053b48463 | 57b7c55b9732ce7d2b0eb30eae2ba71557f29333 | /noticias/pipelines.py | 04eed63ac60a57e61a6527be5db4e4cf2c6bdbd8 | [] | no_license | raianyrufino/WebCrawler-Tecnoblog | ef59aa35c96282caaf73c5e6101c806606858a4b | c7cb239aa66855f002fbcd76fdc12b0aacbffa40 | refs/heads/master | 2020-07-06T15:43:52.641654 | 2019-08-19T00:29:18 | 2019-08-19T00:29:18 | 203,070,948 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
class NoticiasPipeline(object):
def open_spider(self, spider):
self.file = open('notices.txt', 'w')
def close_spider(self, spider):
self.file.close()
def process_item(self, item, spider):
line = json.dumps(dict(item)) + '\n'
self.file.write(line)
return item
| [
"raiany.paz@ccc.ufcg.edu.br"
] | raiany.paz@ccc.ufcg.edu.br |
4d3d41431710f0190e8a8cfd9f2adc2e4f2ee89c | 7e0ea1a29084f9536e02f6d7dcf9a0fb80babf58 | /api/migrations/0004_auto_20160111_1311.py | a94f2bd5eabfbc507e888914844666d8c97079e5 | [] | no_license | ABYARTH/mywallet | a5b0bdbd0d08d22eb55fbc55e61147b92fcc5805 | 8eb0ce84422b55d0211e391269a7716b4f9c90a7 | refs/heads/master | 2021-01-16T20:00:05.470714 | 2016-01-14T15:17:26 | 2016-01-14T15:17:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0003_auto_20160110_2120'),
]
operations = [
migrations.CreateModel(
name='Mywallet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('wallet', models.FloatField(default=0.0)),
('contact_number', models.CharField(max_length=15)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
options={
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='biller',
name='contact_number',
),
migrations.RemoveField(
model_name='biller',
name='name',
),
migrations.RemoveField(
model_name='biller',
name='wallet',
),
migrations.RemoveField(
model_name='customer',
name='contact_number',
),
migrations.RemoveField(
model_name='customer',
name='wallet',
),
migrations.RemoveField(
model_name='transaction',
name='amount_involved',
),
migrations.RemoveField(
model_name='transaction',
name='content_type',
),
migrations.RemoveField(
model_name='transaction',
name='object_id',
),
migrations.AddField(
model_name='transaction',
name='amount',
field=models.FloatField(default=0.0),
preserve_default=True,
),
migrations.AlterField(
model_name='biller',
name='biller',
field=models.ForeignKey(to='api.Mywallet'),
preserve_default=True,
),
migrations.AlterField(
model_name='biller',
name='commission',
field=models.FloatField(default=0.0),
preserve_default=True,
),
migrations.AlterField(
model_name='biller',
name='unloaded_amount',
field=models.FloatField(default=0.0),
preserve_default=True,
),
migrations.AlterField(
model_name='customer',
name='customer',
field=models.ForeignKey(to='api.Mywallet'),
preserve_default=True,
),
migrations.AlterField(
model_name='transaction',
name='from_user',
field=models.ForeignKey(related_name='txn_from', to='api.Mywallet'),
preserve_default=True,
),
migrations.AlterField(
model_name='transaction',
name='to_user',
field=models.ForeignKey(related_name='txn_towards', to='api.Mywallet'),
preserve_default=True,
),
]
| [
"s.mohanty.006@gmail.com"
] | s.mohanty.006@gmail.com |
b0294d968ec5dd4977c310aa196900c015299619 | f678f5a4882a6f1988ecacbcece487f782ac1fec | /mybook/mybook/urls.py | 64f277c984dfe033e863e6c2733e9eda19e69f7e | [] | no_license | a3636tako/django-test | bbad1667895bd441d77b50b0f100fa7b93570a1a | ea213b22eb704134e5ca6d38ff827179380c189b | refs/heads/master | 2020-04-18T02:38:02.608459 | 2016-09-07T06:42:37 | 2016-09-07T06:42:37 | 67,034,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | """mybook URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^cms/', include('cms.urls', namespace='cms')), # ←ここを追加
]
| [
"a3636tako@gmail.com"
] | a3636tako@gmail.com |
015f23d3858690ee7470909983c15dd848b5709a | 46f91363f5cc43b1644a7da93938aef3c0de29c5 | /leonardo/module/media/__init__.py | 233a0f5b0e426c65d5e8688c40baf9bf33e3e777 | [
"BSD-2-Clause"
] | permissive | shinichi81/django-leonardo | 55e1f7492813b8a877dac92aadb114785ea2eb83 | 152ad02ba23b8bc94f676a7221c15338181c67b7 | refs/heads/master | 2021-01-14T12:45:14.400206 | 2015-11-01T09:38:55 | 2015-11-01T09:38:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,222 | py |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
from .widget import *
default_app_config = 'leonardo.module.media.MediaConfig'
class Default(object):
optgroup = 'Media'
@property
def apps(self):
return [
'leonardo.module',
'leonardo.module.media',
]
@property
def widgets(self):
return [
DownloadListWidget,
DownloadItemWidget,
InternetVideoWidget,
MediaGalleryWidget,
SimpleImageWidget,
VectorGraphicsWidget,
PdfDocumentWidget,
FlashObjectWidget,
]
plugins = [
('leonardo.module.media.apps.category_nested', 'List of directories'),
('leonardo.module.media.apps.category_simple', 'Simple list of directories'),
]
config = {
'MEDIA_PAGINATE_BY': (25, _('Pagination count for media files')),
'MEDIA_PUBLIC_UPLOAD_TO': ('public', _('Prefix for public files from MEDIA_ROOT')),
'MEDIA_PRIVATE_UPLOAD_TO': ('private', _('Prefix for private files from MEDIA_ROOT')),
'MEDIA_IS_PUBLIC_DEFAULT': (True, _('Set uploaded files to public automatically')),
'MEDIA_ENABLE_PERMISSIONS': (True, _(
'Permissions for downloadable items. Experimental feature.')),
'MEDIA_ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS': (False, _('ALLOW_REGULAR_USERS_TO_ADD_ROOT_FOLDERS')),
'MEDIA_THUMB_SMALL_GEOM': ('64x64', _('MEDIA_THUMB_SMALL_GEOM')),
'MEDIA_THUMB_SMALL_OPT': ('', _('Another options for small thumnails')),
'MEDIA_THUMB_MEDIUM_GEOM': ('256x256', _('MEDIA_THUMB_MEDIUM_GEOM')),
'MEDIA_THUMB_MEDIUM_OPT': ('', _('Another options for medium thumnails')),
'MEDIA_THUMB_LARGE_GEOM': ('768x768', _('MEDIA_THUMB_LARGE_GEOM')),
'MEDIA_THUMB_LARGE_OPT': ('', _('Another options for large thumnails')),
'MEDIA_LOGICAL_STRUCTURE': (False, _('If is True all folders and files will has same path in the OS')),
}
page_actions = ['media/_actions.html']
class MediaConfig(AppConfig, Default):
name = 'leonardo.module.media'
verbose_name = "Media"
default = Default()
| [
"6du1ro.n@gmail.com"
] | 6du1ro.n@gmail.com |
47652a71e3b9b2b701a573c654088f48cdd6007c | b54ed58e5a6e9d8f468c1f36544d6782b276f3be | /tag_11.py | 18d0352a503bcc41c6d6699d07184ae0183713f6 | [] | no_license | JensGutow/AdventOfCode2020 | c69ff3d1be5ff6cf399c4a3ecb14fa1c70323d74 | 9e116175f0042dacdde182424f1286801e7da131 | refs/heads/main | 2023-02-08T10:00:43.584837 | 2020-12-31T06:57:18 | 2020-12-31T06:57:18 | 322,389,060 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,170 | py | import time
def get_puzzle(file_name):
d = {}
z = 0
with open(file_name) as f:
for zeile in f:
for s, c in enumerate(zeile.strip()):
d[z,s] = c
z += 1
return d
def get_number_occ_seats(d):
return list(d.values()).count("#")
def get_number_occ_neighbors1(d, z, s):
deltas = [[-1,0],[-1,1],[0,1],[1,1],[1,0],[1,-1],[0,-1],[-1,-1]]
n = 0
for dx,dy in deltas:
if d.get((z+dx, s+dy),".") == "#":
n += 1
return n
def get_number_occ_neighbors2(d, z, s):
deltas = [[-1,0],[-1,1],[0,1],[1,1],[1,0],[1,-1],[0,-1],[-1,-1]]
n = 0
for dx,dy in deltas:
i = 1
while True:
c = d.get((z + (dx*i), s + (dy*i)),"E")
if c in "EL": break
if c == "#":
n += 1
break
i+=1
return n
def iteration1(d):
result = {}
c_new = ""
for (x,y), c in d.items():
n_occ = get_number_occ_neighbors1(d, x, y)
if c == "#" and n_occ >= 4: result[x,y] ="L"
elif c=="L" and n_occ == 0: result[x,y] = "#"
else: result[x,y] = c
return result
def iteration2(d):
result = {}
c_new = ""
for (x,y), c in d.items():
n_occ = get_number_occ_neighbors2(d, x, y)
if c == "#" and n_occ >= 5: result[x,y] ="L"
elif c=="L" and n_occ == 0: result[x,y] = "#"
else: result[x,y] = c
return result
def task(p, it_fct):
its = 0
abbruch = False
n_occ_seats = get_number_occ_seats(p)
while not abbruch:
p = it_fct(p)
n_occ_seats_new = get_number_occ_seats(p)
#print(its, n_occ_seats_new)
if n_occ_seats_new != n_occ_seats:
its +=1
n_occ_seats = n_occ_seats_new
else:
abbruch = True
return n_occ_seats
p = get_puzzle("tag_11.txt")
p2 = p.copy()
print("Task 1")
start =time.perf_counter()
n_occ_seats = task(p,iteration1)
print(time.perf_counter() - start)
print(n_occ_seats)
print("Task 2")
p = p2
start =time.perf_counter()
n_occ_seats = task(p,iteration2)
print(time.perf_counter() - start)
print(n_occ_seats) | [
"jens_gutow@web.de"
] | jens_gutow@web.de |
0ea35b60098989cbad8bece1f505638fa7a685d2 | 01ed217a3c3c028e6cf4e3675cb86f4eef992e13 | /SimG4Core/PrintGeomInfo/test/python/runPrintSolid_cfg.py | bb9e7a06455f3f00c6cc1a434b1f718f2240c745 | [
"Apache-2.0"
] | permissive | dtp2-tpg-am/cmssw | ae318d154779c311e2e93cdffe0c7bc24d6d2593 | 7a32f48e079f78b501deee6cc9d19caba269e7fb | refs/heads/AM_12_0_2_dev | 2022-11-04T12:05:05.822865 | 2021-10-28T07:25:28 | 2021-10-28T07:25:28 | 185,209,257 | 2 | 1 | Apache-2.0 | 2022-04-26T07:18:06 | 2019-05-06T14:07:10 | C++ | UTF-8 | Python | false | false | 1,897 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run3_cff import Run3
process = cms.Process('G4PrintGeometry',Run3)
process.load('Configuration.Geometry.GeometryExtended2021Reco_cff')
#from Configuration.Eras.Era_Run3_dd4hep_cff import Run3_dd4hep
#process = cms.Process('G4PrintGeometry',Run3_dd4hep)
#process.load('Configuration.Geometry.GeometryDD4hepExtended2021Reco_cff')
process.load('SimGeneral.HepPDTESSource.pdt_cfi')
process.load('IOMC.RandomEngine.IOMC_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedFlat_cfi')
process.load('GeneratorInterface.Core.generatorSmeared_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('SimG4Core.Application.g4SimHits_cfi')
process.load('SimG4Core.PrintGeomInfo.printGeomSolids_cff')
if hasattr(process,'MessageLogger'):
process.MessageLogger.G4cout=dict()
process.MessageLogger.G4cerr=dict()
process.MessageLogger.PrintGeom=dict()
process.source = cms.Source("EmptySource")
process.generator = cms.EDProducer("FlatRandomEGunProducer",
PGunParameters = cms.PSet(
PartID = cms.vint32(14),
MinEta = cms.double(-3.5),
MaxEta = cms.double(3.5),
MinPhi = cms.double(-3.14159265359),
MaxPhi = cms.double(3.14159265359),
MinE = cms.double(9.99),
MaxE = cms.double(10.01)
),
AddAntiParticle = cms.bool(False),
Verbosity = cms.untracked.int32(0),
firstRun = cms.untracked.uint32(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.g4SimHits.UseMagneticField = False
process.g4SimHits.Physics.type = 'SimG4Core/Physics/DummyPhysics'
process.g4SimHits.Physics.DummyEMPhysics = True
process.g4SimHits.Physics.DefaultCutValue = 10.
process.p1 = cms.Path(process.generator*process.VtxSmeared*process.generatorSmeared*process.g4SimHits*process.printGeomSolids)
| [
"sunanda.banerjee@cern.ch"
] | sunanda.banerjee@cern.ch |
e2a0f1050ad9f87f032cba917ce82b55116395fc | a39dd95321b26e464d103981440b6721f0b8ade9 | /Proxy/DB/__init__.py | 5fac9315feb12c294920c375485a11e13b024367 | [] | no_license | willame/Myproxy | 351b4b640238f17cce172267b3294283d3a5f09d | 952d71b19b8b5270573d97aa9366fc0bd3ce926f | refs/heads/master | 2020-12-30T12:23:21.034547 | 2017-05-10T15:34:55 | 2017-05-10T15:34:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | from Proxy.DB.mongodb import Mongo
DBNAME = "database"
COLLECTION = "proxy"
db = Mongo(DBNAME, COLLECTION) | [
"706543191@qq.com"
] | 706543191@qq.com |
9188d34a74f7a78c4794794f5866b1a927835e9a | e8291b4582453879d856b75ba0caf8d9328119a4 | /src/config/network.py | 6b3948623ad0c3447c43c3397f7d5b99c00640da | [] | no_license | Blito/ESP32-DHT22-temperature-humidity | a17971c090c71a7238f029104713008ce8cb6d6b | 6c77424e8d37a9a6f66f58a52c84aea00011f413 | refs/heads/master | 2022-08-27T16:17:54.787393 | 2020-05-29T04:04:35 | 2020-05-29T04:55:02 | 267,769,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | SERVER='your.server.ip.here'
SSID='Your SSID here'
PASSWORD='yourpasswordhere' | [
"pabloarubi@gmail.com"
] | pabloarubi@gmail.com |
1fecbec5373dd163ef54b1223cc735a39bcb3d4d | b99863dc391f0b959cbea12bce26eb7021a3c594 | /Crawler/splitSqlToFile.py | 37195911c3c24a3137ca2802cea9a9f51dd5d403 | [] | no_license | choakai/thesis | 3c180b2740866fb31c5cff69c2a9eb28e3ef9814 | f456b5ed28e5cc06da4a1a04bd9b9f47768f2fdc | refs/heads/master | 2020-05-16T14:21:31.254755 | 2015-01-27T18:01:43 | 2015-01-27T18:01:43 | 26,497,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,220 | py | # -*- coding: utf-8 -*-
#coding=utf-8
import sys
import pyodbc
import codecs
import win32com.client
connStr = 'DRIVER={SQL Server};SERVER=localhost;DATABASE=thesis;UID=sa;PWD=P@ssw0rd'
conn = pyodbc.connect(connStr)
FilePath = 'D:\\Crawler\\CKIPClient\\thesis\\in\\'
#conn = win32com.client.Dispatch(r'ADODB.Connection')
#DSN = 'Provider=SQLNCLI11.1;Integrated Security="";Persist Security Info=False;User ID=sa;Password=P@ssw0rd;Initial Catalog=THESIS;Data Source=(local);'
#conn.Open(DSN)
strSQL ="select * from data_src order by urlid"
cursor = conn.cursor()
cursor.execute(strSQL)
#row = cursor.fetchone()
#if row:
# print row
for row in cursor:
try:
#print row.context_data
strContext = unicode(row.context_data)
intFlag = 1
while len(strContext) > 0:
if len(strContext) <= 3000:
f = codecs.open(FilePath + str(row.urlid) +'_'+str(intFlag) + ".txt", "w+", "utf-8")
f.writelines(unicode(strContext))
f.close()
strContext = ''
continue
else:
flag = [0]
flag[len(flag):] = [strContext[:3000].rfind(u',')]
flag[len(flag):] = [strContext[:3000].rfind(u'.')]
flag[len(flag):] = [strContext[:3000].rfind(u'!')]
flag[len(flag):] = [strContext[:3000].rfind(u'?')]
flag[len(flag):] = [strContext[:3000].rfind(u',')]
flag[len(flag):] = [strContext[:3000].rfind(u'。')]
flag[len(flag):] = [strContext[:3000].rfind(u'!')]
flag[len(flag):] = [strContext[:3000].rfind(u'?')]
maxflag = max(flag)
if maxflag == 0:
break
f = codecs.open(FilePath + str(row.urlid) +'_'+str(intFlag) + ".txt", "w+", "utf-8")
f.writelines(unicode(strContext[:maxflag]))
f.close()
strContext = strContext[maxflag:]
intFlag += 1
except:
type, value, tb = sys.exc_info()
print "Unexpected error:", type
print "Unexpected error:", value.message
| [
"choakai@gmail.com"
] | choakai@gmail.com |
823203975452d074cb1a81ae7f37b18b0a3fbb53 | 35844c887d6da13d5b72e297183991aa0cea1b52 | /experiments/I-FGSM-eval.py | 8330e159fd2609e8d8b6f5596e7385f416e42842 | [] | no_license | soarlab/AAQNN | 5c5f87cf594ddb6f6c800907fa11d452bc88b4dc | ea6627ad9f0d55196d0dde90d7dbe5472be99d66 | refs/heads/master | 2022-01-21T08:11:44.616642 | 2019-06-24T08:42:45 | 2019-06-24T08:42:45 | 178,188,010 | 0 | 0 | null | 2022-01-13T01:08:45 | 2019-03-28T11:17:56 | Python | UTF-8 | Python | false | false | 16,650 | py | '''
This experiment is structured as follows:
1. Train QNNs for all quantization levels
2. Load samples that are correctly classified by all the QNNs from step 1 (accuracies are 100% on these samples)
3. Run the iterative FGSM attack for different Q levels
4. Evaluate the QNNs on new adversarial samples
Original paper: https://arxiv.org/pdf/1607.02533.pdf
'''
import tensorflow as tf
from cleverhans.attacks import ProjectedGradientDescent
from cleverhans.utils_keras import KerasModelWrapper
from keras import backend as K
from experiments.utils import get_fashion_mnist, filter_correctly_classified_samples, get_QNN, get_vanilla_NN, get_stats
import matplotlib.pyplot as plt
import numpy as np
EPOCHS = 2
EPS = 0.06
FGSM_PARAMS = {'clip_min': 0.,
'clip_max': 1.,
'eps': EPS,
# as in the original paper
'nb_iter': int(min(EPS * 255 + 4, 1.25 * EPS * 255)),
'rand_init': 0.
}
# initialize keras/tf session
sess = tf.Session(graph=tf.get_default_graph())
K.set_session(sess)
# get dataset
(train_images, train_labels), (test_images, test_labels) = get_fashion_mnist()
# load models
model_4bits_1 = get_QNN(4)
model_8bits_1 = get_QNN(8)
model_16bits_1 = get_QNN(16)
model_32bits_1 = get_QNN(32)
model_vanilla_nn_1 = get_vanilla_NN()
model_4bits_2 = get_QNN(4)
model_8bits_2 = get_QNN(8)
model_16bits_2 = get_QNN(16)
model_32bits_2 = get_QNN(32)
model_vanilla_nn_2 = get_vanilla_NN()
# train models
print("Training models...")
model_vanilla_nn_1.fit(train_images, train_labels, epochs=EPOCHS, verbose=0)
model_4bits_1.fit(train_images, train_labels, epochs=EPOCHS, verbose=0)
model_8bits_1.fit(train_images, train_labels, epochs=EPOCHS, verbose=0)
model_16bits_1.fit(train_images, train_labels, epochs=EPOCHS, verbose=0)
model_32bits_1.fit(train_images, train_labels, epochs=EPOCHS, verbose=0)
# plot weights distribution
print ("Vanilla weights")
min_value = None
max_value = None
weights_vanilla = []
for layer in model_vanilla_nn_1.get_weights():
for neuron in layer:
if isinstance(neuron, np.float32):
# bias
weights_vanilla.append(neuron)
continue
for weight in neuron:
weights_vanilla.append(weight)
if min_value is None or weight < min_value:
min_value = weight
if max_value is None or weight > max_value:
max_value = weight
ids = [x for x in range(0, len(weights_vanilla))]
plt.scatter(ids, weights_vanilla, marker=',', s=0.52)
axes = plt.gca()
axes.set_ylim([-1.1,1.1])
plt.title("Vanilla NN")
plt.xlabel('Weight "ids"', fontsize=18)
plt.ylabel('Weight value', fontsize=16)
plt.show()
mean, std, min, max = get_stats(np.array(weights_vanilla))
print("mean: " + str(mean))
print("std dev: " + str(std))
print("min: " + str(min))
print("max: " + str(max))
print ("QNN weights")
min_value = None
max_value = None
weights_qnn = []
for layer in model_8bits_1.get_weights():
for neuron in layer:
if isinstance(neuron, np.float32):
# bias
weights_qnn.append(neuron)
continue
for weight in neuron:
weights_qnn.append(weight)
if min_value is None or weight < min_value:
min_value = weight
if max_value is None or weight > max_value:
max_value = weight
plt.scatter(ids, weights_qnn, marker=',', s=0.52)
axes = plt.gca()
axes.set_ylim([-1.1,1.1])
plt.title("QNN")
plt.xlabel('Weight "ids"', fontsize=18)
plt.ylabel('Weight value', fontsize=16)
plt.show()
mean, std, min, max = get_stats(np.array(weights_qnn))
print("mean: " + str(mean))
print("std dev: " + str(std))
print("min: " + str(min))
print("max: " + str(max))
model_4bits_2.fit(train_images, train_labels, epochs=EPOCHS, verbose=0)
model_8bits_2.fit(train_images, train_labels, epochs=EPOCHS, verbose=0)
model_16bits_2.fit(train_images, train_labels, epochs=EPOCHS, verbose=0)
model_32bits_2.fit(train_images, train_labels, epochs=EPOCHS, verbose=0)
model_vanilla_nn_2.fit(train_images, train_labels, epochs=EPOCHS, verbose=0)
print("Training finished.")
# evaluate models on the test set
_, test_acc = model_4bits_1.evaluate(test_images, test_labels, verbose=0)
print("Test accuracy of QNN_1 with 4 bits: " + str(test_acc))
_, test_acc = model_4bits_2.evaluate(test_images, test_labels, verbose=0)
print("Test accuracy of QNN_2 with 4 bits: " + str(test_acc))
_, test_acc = model_8bits_1.evaluate(test_images, test_labels, verbose=0)
print("Test accuracy of QNN_1 with 8 bits: " + str(test_acc))
_, test_acc = model_8bits_2.evaluate(test_images, test_labels, verbose=0)
print("Test accuracy of QNN_2 with 8 bits: " + str(test_acc))
_, test_acc = model_16bits_1.evaluate(test_images, test_labels, verbose=0)
print("Test accuracy of QNN_1 with 16 bits: " + str(test_acc))
_, test_acc = model_16bits_2.evaluate(test_images, test_labels, verbose=0)
print("Test accuracy of QNN_2 with 16 bits: " + str(test_acc))
_, test_acc = model_32bits_1.evaluate(test_images, test_labels, verbose=0)
print("Test accuracy of QNN_1 with 32 bits: " + str(test_acc))
_, test_acc = model_32bits_2.evaluate(test_images, test_labels, verbose=0)
print("Test accuracy of QNN_2 with 32 bits: " + str(test_acc))
_, test_acc = model_vanilla_nn_1.evaluate(test_images, test_labels, verbose=0)
print("Test accuracy of vanilla NN_1 (with 32 bits): " + str(test_acc))
_, test_acc = model_vanilla_nn_2.evaluate(test_images, test_labels, verbose=0)
print("Test accuracy of vanilla NN_2 (with 32 bits): " + str(test_acc))
#filter samples correctly classified by all models
all_models = [model_4bits_1, model_4bits_2,
model_8bits_1, model_8bits_2,
model_16bits_1, model_16bits_2,
model_32bits_1, model_32bits_2,
model_vanilla_nn_1, model_vanilla_nn_2]
test_images, test_labels = filter_correctly_classified_samples(test_images, test_labels, all_models)
print("From now on using " + str(test_images.shape[0]) + " samples that are correctly classified by all " + str(len(all_models)) + " networks.")
print("All neural networks now have 100% accuracy.")
print()
# perform attack on 4 bits QNN
print("Generating adversarial samples for QNN_1 with 4 bits..")
wrap = KerasModelWrapper(model_4bits_1)
iterative_fgsm = ProjectedGradientDescent(wrap, sess)
adv = iterative_fgsm.generate_np(test_images, **FGSM_PARAMS)
print("Finished generating adversarial samples")
# quantify perturbation
mean, std, min, max = get_stats(np.array([np.linalg.norm(x - y) for x, y in zip(test_images, adv)]))
print("Information about L2 distances between adversarial and original samples:")
print("mean: " + str(mean))
print("std dev: " + str(std))
print("min: " + str(min))
print("max: " + str(max))
# evaluate models on adv samples
print("Evaluating accuracy of all neural networks on adversarial samples crafted for 4 bits QNN_1..")
_, test_acc = model_4bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 4 bits: " + str(test_acc))
_, test_acc = model_4bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 4 bits: " + str(test_acc))
_, test_acc = model_8bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 8 bits: " + str(test_acc))
_, test_acc = model_8bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 8 bits: " + str(test_acc))
_, test_acc = model_16bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 16 bits: " + str(test_acc))
_, test_acc = model_16bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 16 bits: " + str(test_acc))
_, test_acc = model_32bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 32 bits: " + str(test_acc))
_, test_acc = model_32bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 32 bits: " + str(test_acc))
_, test_acc = model_vanilla_nn_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of vanilla NN_1 (with 32 bits): " + str(test_acc))
_, test_acc = model_vanilla_nn_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of vanilla NN_2 (with 32 bits): " + str(test_acc))
print()
# perform attack on 8 bits QNN
print("Generating adversarial samples for QNN_1 with 8 bits..")
wrap = KerasModelWrapper(model_8bits_1)
iterative_fgsm = ProjectedGradientDescent(wrap, sess)
adv = iterative_fgsm.generate_np(test_images, **FGSM_PARAMS)
print("Finished generating adversarial samples")
# quantify perturbation
mean, std, min, max = get_stats(np.array([np.linalg.norm(x - y) for x, y in zip(test_images, adv)]))
print("Information about L2 distances between adversarial and original samples:")
print("mean: " + str(mean))
print("std dev: " + str(std))
print("min: " + str(min))
print("max: " + str(max))
# evaluate models on adv samples
print("Evaluating accuracy of all neural networks on adversarial samples crafted for 8 bits QNN_1..")
_, test_acc = model_4bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 4 bits: " + str(test_acc))
_, test_acc = model_4bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 4 bits: " + str(test_acc))
_, test_acc = model_8bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 8 bits: " + str(test_acc))
_, test_acc = model_8bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 8 bits: " + str(test_acc))
_, test_acc = model_16bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 16 bits: " + str(test_acc))
_, test_acc = model_16bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 16 bits: " + str(test_acc))
_, test_acc = model_32bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 32 bits: " + str(test_acc))
_, test_acc = model_32bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 32 bits: " + str(test_acc))
_, test_acc = model_vanilla_nn_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of vanilla NN_1 (with 32 bits): " + str(test_acc))
_, test_acc = model_vanilla_nn_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of vanilla NN_2 (with 32 bits): " + str(test_acc))
print()
# perform attack on 16 bits QNN
print("Generating adversarial samples for QNN_1 with 16 bits..")
wrap = KerasModelWrapper(model_16bits_1)
iterative_fgsm = ProjectedGradientDescent(wrap, sess)
adv = iterative_fgsm.generate_np(test_images, **FGSM_PARAMS)
print("Finished generating adversarial samples")
# quantify perturbation
mean, std, min, max = get_stats(np.array([np.linalg.norm(x - y) for x, y in zip(test_images, adv)]))
print("Information about L2 distances between adversarial and original samples:")
print("mean: " + str(mean))
print("std dev: " + str(std))
print("min: " + str(min))
print("max: " + str(max))
# evaluate models on adv samples
print("Evaluating accuracy of all neural networks on adversarial samples crafted for 16 bits QNN_1..")
_, test_acc = model_4bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 4 bits: " + str(test_acc))
_, test_acc = model_4bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 4 bits: " + str(test_acc))
_, test_acc = model_8bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 8 bits: " + str(test_acc))
_, test_acc = model_8bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 8 bits: " + str(test_acc))
_, test_acc = model_16bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 16 bits: " + str(test_acc))
_, test_acc = model_16bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 16 bits: " + str(test_acc))
_, test_acc = model_32bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 32 bits: " + str(test_acc))
_, test_acc = model_32bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 32 bits: " + str(test_acc))
_, test_acc = model_vanilla_nn_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of vanilla NN_1 (with 32 bits): " + str(test_acc))
_, test_acc = model_vanilla_nn_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of vanilla NN_2 (with 32 bits): " + str(test_acc))
print()
# perform attack on 32 bits QNN
print("Generating adversarial samples for QNN with 32 bits..")
wrap = KerasModelWrapper(model_32bits_1)
iterative_fgsm = ProjectedGradientDescent(wrap, sess)
adv = iterative_fgsm.generate_np(test_images, **FGSM_PARAMS)
print("Finished generating adversarial samples")
# quantify perturbation
mean, std, min, max = get_stats(np.array([np.linalg.norm(x - y) for x, y in zip(test_images, adv)]))
print("Information about L2 distances between adversarial and original samples:")
print("mean: " + str(mean))
print("std dev: " + str(std))
print("min: " + str(min))
print("max: " + str(max))
# evaluate models on adv samples
print("Evaluating accuracy of all neural networks on adversarial samples crafted for 32 bits QNN_1..")
_, test_acc = model_4bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 4 bits: " + str(test_acc))
_, test_acc = model_4bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 4 bits: " + str(test_acc))
_, test_acc = model_8bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 8 bits: " + str(test_acc))
_, test_acc = model_8bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 8 bits: " + str(test_acc))
_, test_acc = model_16bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 16 bits: " + str(test_acc))
_, test_acc = model_16bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 16 bits: " + str(test_acc))
_, test_acc = model_32bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 32 bits: " + str(test_acc))
_, test_acc = model_32bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 32 bits: " + str(test_acc))
_, test_acc = model_vanilla_nn_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of vanilla NN_1 (with 32 bits): " + str(test_acc))
_, test_acc = model_vanilla_nn_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of vanilla NN_2 (with 32 bits): " + str(test_acc))
print()
# perform attack on (32 bits) vanilla NN
print("Generating adversarial samples for vanilla NN_1 (with 32 bits)..")
wrap = KerasModelWrapper(model_vanilla_nn_1)
iterative_fgsm = ProjectedGradientDescent(wrap, sess)
adv = iterative_fgsm.generate_np(test_images, **FGSM_PARAMS)
print("Finished generating adversarial samples")
# quantify perturbation
mean, std, min, max = get_stats(np.array([np.linalg.norm(x - y) for x, y in zip(test_images, adv)]))
print("Information about L2 distances between adversarial and original samples:")
print("mean: " + str(mean))
print("std dev: " + str(std))
print("min: " + str(min))
print("max: " + str(max))
# evaluate models on adv samples
print("Evaluating accuracy of all neural networks on adversarial samples crafted for vanilla NN_1 (32 bits)..")
_, test_acc = model_4bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 4 bits: " + str(test_acc))
_, test_acc = model_4bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 4 bits: " + str(test_acc))
_, test_acc = model_8bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 8 bits: " + str(test_acc))
_, test_acc = model_8bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 8 bits: " + str(test_acc))
_, test_acc = model_16bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 16 bits: " + str(test_acc))
_, test_acc = model_16bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 16 bits: " + str(test_acc))
_, test_acc = model_32bits_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_1 with 32 bits: " + str(test_acc))
_, test_acc = model_32bits_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of QNN_2 with 32 bits: " + str(test_acc))
_, test_acc = model_vanilla_nn_1.evaluate(adv, test_labels, verbose=0)
print("Accuracy of vanilla NN_1 (with 32 bits): " + str(test_acc))
_, test_acc = model_vanilla_nn_2.evaluate(adv, test_labels, verbose=0)
print("Accuracy of vanilla NN_2 (with 32 bits): " + str(test_acc))
print()
plt.figure(figsize=(5, 5))
for i in range(1, 26):
plt.subplot(5, 5, i)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(adv[i], cmap='gray')
plt.savefig("i-fgsm-vanilla-NN-adv" + str(EPS) + ".png")
| [
"martin.matak@gmail.com"
] | martin.matak@gmail.com |
677564fe5565b9383265cc420c7714182563d206 | fe1f0631ee492dca4ec4485f66c8b40f05f3178c | /anagramofpalindrome.py | eb34ee2de25a8dca039ffcddecda1adaa7dce558 | [] | no_license | ashleyabrooks/code-challenges | ad92d23eb98e2889609df79d7a5b107da12fbc67 | 7123ae9b2d2a3098f1678a6ea11acb8d917bc562 | refs/heads/master | 2020-05-23T08:27:24.735700 | 2017-05-04T18:45:10 | 2017-05-04T18:45:10 | 84,757,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,397 | py | """Is the word an anagram of a palindrome?
A palindrome is a word that reads the same forward and backwards
(eg, "racecar", "tacocat"). An anagram is a rescrambling of a word
(eg for "racecar", you could rescramble this as "arceace").
Determine if the given word is a re-scrambling of a palindrome.
The word will only contain lowercase letters, a-z.
Examples::
>>> is_anagram_of_palindrome("a")
True
>>> is_anagram_of_palindrome("ab")
False
>>> is_anagram_of_palindrome("aab")
True
>>> is_anagram_of_palindrome("arceace")
True
>>> is_anagram_of_palindrome("arceaceb")
False
"""
def is_anagram_of_palindrome(word):
"""Is the word an anagram of a palindrome?
1. Put letters in dictionary with frequency as values
2. Check if any of the letter frequencies are odd numbers
3. If there is one or less odd numbers, return True
"""
letter_freq = {}
for letter in word:
if letter in letter_freq:
letter_freq[letter] += 1
else:
letter_freq[letter] = 1
odd_letter_freq = 0
for letter in letter_freq:
if letter_freq[letter] % 2 != 0:
odd_letter_freq += 1
if odd_letter_freq > 1:
return False
return True
if __name__ == '__main__':
import doctest
if doctest.testmod().failed == 0:
print "\n*** ALL TEST PASSED!\n" | [
"ashley.brooks.a@gmail.com"
] | ashley.brooks.a@gmail.com |
3a72acdecca2753879d1c90b4a2dd713327a6573 | 74081fd60cea91ef2153c54559c2bba1ef494d18 | /task_4/src/calculate_5sma.py | 78a01108cdd0b1f348873e1ccf559e2555e3426e | [] | no_license | Avvallack/ML_Engineering | a59c8a5872263bf0b88e2b7d0aa25bf2c2270e70 | 8247a677b36874207bdf94f92aa43a23fe5faac0 | refs/heads/master | 2023-06-20T08:53:22.984821 | 2021-07-19T13:10:28 | 2021-07-19T13:10:28 | 366,710,222 | 0 | 1 | null | 2021-06-09T07:37:05 | 2021-05-12T12:37:07 | Python | UTF-8 | Python | false | false | 987 | py | import os
import pandas as pd
import datetime as dt
from argparse import ArgumentParser
def calculate_5sma(tick_name, date):
date = dt.datetime.strptime(date, "%Y-%m-%d-%H")
date = date - dt.timedelta(hours=1)
hour = str(date.hour)
str_date = date.strftime("%Y-%m-%d")
df = pd.read_csv('/opt/airflow/data/' + tick_name + '/average/' + str_date + '/' + hour + '.csv', index_col=0)
df['5SMA'] = df['Mean'].rolling(window=5).mean()
path = os.path.abspath('/opt/airflow/data/' + tick_name + '/moving_averages/' + str_date)
if not os.path.exists(path):
os.makedirs(path)
print("Directory ", path, " Created ")
df.to_csv(path + '/5SMA_' + hour + '.csv')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--tick_name', type=str, default='AAPL')
parser.add_argument('--date', type=str, default=dt.datetime.now().strftime("%Y-%m-%d-%H"))
args = parser.parse_args()
calculate_5sma(**vars(args))
| [
"avvallack@gmail.com"
] | avvallack@gmail.com |
f5b462ddc5e915ef1e194c4b624b990cb536d53b | 673b2f10e156e1cb4c351b27e7ce582ba0646caa | /views.py | 6edecbd37afe25cf2e09c99fe424be09fbec9312 | [] | no_license | NegativeDearc/PartsChangeInformation | eef38d6b21efd62a34a68b095091ff01862bed0f | 526223b5600564f04a364cff76f7ce8d54bed4a3 | refs/heads/master | 2021-01-21T04:30:55.708843 | 2016-07-21T07:13:56 | 2016-07-21T07:13:56 | 49,067,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,369 | py | from flask import Flask,render_template,request,redirect,url_for,session,abort
from xlrd_extra_info import extra_info
from get_schedule import get_schedule
from AddDataToDataBase import add_data_VMI,db_to_dat,add_data_MAXX
from os import urandom
import datetime
import pytz
app = Flask(__name__)
app.secret_key = 'UITJMNAGNAUIGKL'
@app.before_request
def csrf_protect():
if request.method == 'POST':
token = session.pop('_csrf_token',None)
if not token or token != request.form.get('_csrf_token'):
abort(403)
def generate_csrf_token():
if '_csrf_token' not in session:
session['_csrf_token'] = urandom(15).encode('hex')
return session['_csrf_token']
app.jinja_env.globals['csrf_token'] = generate_csrf_token
@app.before_request
def conn_db():
pass
@app.route('/index',methods = ['GET','POST'])
@app.route('/',methods = ['GET','POST'])
def index():
df,df0 = get_schedule()
print df,df0
print df.SPEC
day = df.to_html(classes = "dayshift table-hover")
night = df0.to_html(classes = "nightshift table-hover")
tz = pytz.timezone('Asia/Shanghai')
time = format(datetime.datetime.now(tz),'')
#request.form get values from HTML attribute 'name',then compare value with attr 'value'
if request.form.get('go') == 'go':
if request.form.get('spec') is not None:
session['spec'] = request.form.get('spec')
add_data_VMI(session.get('spec'))
add_data_MAXX(session.get('spec'))
db_to_dat()
return redirect(url_for('index'))
return render_template('index.html',day = day,night = night,time = time)
@app.route('/api/<int:SPEC>')
def api(SPEC):
data = extra_info(SPEC)
return render_template('api.html',data = data)
# @app.errorhandler(404)
# def page_not_found(e):
# return render_template('404.html'),404
#
# @app.errorhandler(500)
# def internal_server_error(e):
# return render_template('500.html'),500
if __name__ == '__main__':
# from tornado.wsgi import WSGIContainer
# from tornado.httpserver import HTTPServer
# from tornado.ioloop import IOLoop
#
# http_server = HTTPServer(WSGIContainer(app))
# http_server.listen(5000)
# IOLoop.instance().start()
app.run(threaded = True,host='0.0.0.0')
| [
"datingwithme@live.cn"
] | datingwithme@live.cn |
b829831b94ca8a1f3262021ef1aab5dcd77a1e7a | e57d7785276053332c633b57f6925c90ad660580 | /sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2019_08_01/aio/operations/_managed_clusters_operations.py | 56d3e44113621eb06dcba8abc584742b0bad79cf | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | adriananeci/azure-sdk-for-python | 0d560308497616a563b6afecbb494a88535da4c5 | b2bdfe659210998d6d479e73b133b6c51eb2c009 | refs/heads/main | 2023-08-18T11:12:21.271042 | 2021-09-10T18:48:44 | 2021-09-10T18:48:44 | 405,684,423 | 1 | 0 | MIT | 2021-09-12T15:51:51 | 2021-09-12T15:51:50 | null | UTF-8 | Python | false | false | 62,898 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ManagedClustersOperations:
"""ManagedClustersOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Gets a list of managed clusters in the specified subscription.
Gets a list of managed clusters in the specified subscription. The operation returns properties
of each managed cluster.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2019_08_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ManagedClusterListResult"]:
"""Lists managed clusters in the specified subscription and resource group.
Lists managed clusters in the specified subscription and resource group. The operation returns
properties of each managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ManagedClusterListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerservice.v2019_08_01.models.ManagedClusterListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ManagedClusterListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters'} # type: ignore
async def get_upgrade_profile(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedClusterUpgradeProfile":
"""Gets upgrade profile for a managed cluster.
Gets the details of the upgrade profile for a managed cluster with a specified resource group
and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterUpgradeProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_08_01.models.ManagedClusterUpgradeProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterUpgradeProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get_upgrade_profile.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterUpgradeProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_upgrade_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/upgradeProfiles/default'} # type: ignore
async def get_access_profile(
self,
resource_group_name: str,
resource_name: str,
role_name: str,
**kwargs: Any
) -> "_models.ManagedClusterAccessProfile":
"""Gets an access profile of a managed cluster.
Gets the accessProfile for the specified role name of the managed cluster with a specified
resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param role_name: The name of the role for managed cluster accessProfile resource.
:type role_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedClusterAccessProfile, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_08_01.models.ManagedClusterAccessProfile
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedClusterAccessProfile"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get_access_profile.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'roleName': self._serialize.url("role_name", role_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedClusterAccessProfile', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_access_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/accessProfiles/{roleName}/listCredential'} # type: ignore
async def list_cluster_admin_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster admin credential of a managed cluster.
Gets cluster admin credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_08_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.list_cluster_admin_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_admin_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterAdminCredential'} # type: ignore
async def list_cluster_user_credentials(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.CredentialResults":
"""Gets cluster user credential of a managed cluster.
Gets cluster user credential of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CredentialResults, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_08_01.models.CredentialResults
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CredentialResults"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.list_cluster_user_credentials.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('CredentialResults', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_cluster_user_credentials.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/listClusterUserCredential'} # type: ignore
async def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.ManagedCluster":
"""Gets a managed cluster.
Gets the details of the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ManagedCluster, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2019_08_01.models.ManagedCluster
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedCluster')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedCluster",
**kwargs: Any
) -> AsyncLROPoller["_models.ManagedCluster"]:
"""Creates or updates a managed cluster.
Creates or updates a managed cluster with the specified configuration for agents and Kubernetes
version.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Create or Update a Managed Cluster operation.
:type parameters: ~azure.mgmt.containerservice.v2019_08_01.models.ManagedCluster
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2019_08_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ManagedCluster":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.ManagedCluster"]:
"""Updates tags on a managed cluster.
Updates a managed cluster with the specified tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Update Managed Cluster Tags operation.
:type parameters: ~azure.mgmt.containerservice.v2019_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ManagedCluster or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.containerservice.v2019_08_01.models.ManagedCluster]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ManagedCluster"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ManagedCluster', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a managed cluster.
Deletes the managed cluster with a specified resource group and name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}'} # type: ignore
async def _reset_service_principal_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._reset_service_principal_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedClusterServicePrincipalProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_service_principal_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
async def begin_reset_service_principal_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterServicePrincipalProfile",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reset Service Principal Profile of a managed cluster.
Update the service principal Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset Service Principal Profile operation for a
Managed Cluster.
:type parameters: ~azure.mgmt.containerservice.v2019_08_01.models.ManagedClusterServicePrincipalProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_service_principal_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_service_principal_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetServicePrincipalProfile'} # type: ignore
async def _reset_aad_profile_initial(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._reset_aad_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ManagedClusterAADProfile')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_aad_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
async def begin_reset_aad_profile(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.ManagedClusterAADProfile",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reset AAD Profile of a managed cluster.
Update the AAD Profile for a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters supplied to the Reset AAD Profile operation for a Managed
Cluster.
:type parameters: ~azure.mgmt.containerservice.v2019_08_01.models.ManagedClusterAADProfile
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_aad_profile_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_aad_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resetAADProfile'} # type: ignore
async def _rotate_cluster_certificates_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self._rotate_cluster_certificates_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_rotate_cluster_certificates_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
async def begin_rotate_cluster_certificates(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Rotate certificates of a managed cluster.
Rotate certificates of a managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._rotate_cluster_certificates_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_rotate_cluster_certificates.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/rotateClusterCertificates'} # type: ignore
| [
"noreply@github.com"
] | adriananeci.noreply@github.com |
2a9646791ee6802bbac9b128a86e9e6c4b356ad7 | 38613a48d1dbef6859189b539937e75ff6c5c9e9 | /Kivy/imagepane.py | 734f5946b13b1e1ef6bc44652fe860d30b103bfd | [] | no_license | denim5409/covid-19 | 4925254436abb08a3b83e9bb639c8f5d25704b8f | c6ca5c112d7796a6f317bc6160316cf688ff1177 | refs/heads/master | 2022-11-19T23:00:49.315233 | 2020-07-22T06:22:31 | 2020-07-22T06:22:31 | 280,999,243 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,843 | py |
from kivy.uix.image import Image
from kivy.lang import Builder
from kivy.app import App
from selectionbox import SelectionBox
Builder.load_file("imagepane.kv")
class ImagePane(Image):
drawing_rectangle = None
rectangles = []
def __init__(self, **kwargs):
super(ImagePane, self).__init__(**kwargs)
self.register_event_type('on_store_rectangles')
def on_store_rectangles(self, *args, **kwargs):
pass
def on_touch_move(self, touch):
if self.collide_point(*touch.pos):
pos = [min(touch.pos[n], touch.opos[n]) for n in [0, 1]]
size = [abs(touch.pos[n] - touch.opos[n]) for n in [0, 1]]
if self.drawing_rectangle is None:
self.drawing_rectangle = SelectionBox(pos=pos, size=size, image_pane=self)
self.add_new_rectangle(self.drawing_rectangle)
else:
self.drawing_rectangle.pos = pos
self.drawing_rectangle.size = size
def on_touch_up(self, touch):
if self.drawing_rectangle:
self.drawing_rectangle.compute_unit_coordinates()
self.drawing_rectangle = None
self.store_rectangles()
def add_new_rectangle(self, rect):
self.add_widget(rect)
self.rectangles.append(rect)
def delete_last_rectangle(self):
if self.rectangles:
bad_rectangle = self.rectangles.pop()
self.remove_widget(bad_rectangle)
self.store_rectangles()
def clear_rectangles(self):
self.rectangles = []
self.clear_widgets()
def store_rectangles(self):
self.dispatch('on_store_rectangles', rectangles=self.rectangles)
def redraw_rectangles(self):
for rect in self.rectangles:
rect.compute_screen_coordinates()
| [
"denim3@hanmail.net"
] | denim3@hanmail.net |
1897d9ce65665335394d0b57ff2ccf5a2082d7f6 | 5f2608d4a06e96c3a032ddb66a6d7e160080b5b0 | /week6/homework_w6_q_c1.py | 406a821246f24f931111b8aadf5a01215a8e8aea | [] | no_license | sheikhusmanshakeel/statistical-mechanics-ens | f3e150030073f3ca106a072b4774502b02b8f1d0 | ba483dc9ba291cbd6cd757edf5fc2ae362ff3df7 | refs/heads/master | 2020-04-08T21:40:33.580142 | 2014-04-28T21:10:19 | 2014-04-28T21:10:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,542 | py | import math, random, pylab
def rho_free(x, y, beta):
return math.exp(-(x - y) ** 2 / (2.0 * beta))
def levy_free_path(xstart, xend, dtau, N):
x = [xstart]
for k in range(1, N):
dtau_prime = (N - k) * dtau
x_mean = (dtau_prime * x[k - 1] + dtau * xend) / (dtau + dtau_prime)
sigma = math.sqrt(1.0 / (1.0 / dtau + 1.0 / dtau_prime))
x.append(random.gauss(x_mean, sigma))
return x
beta = 20.0
N = 80
dtau = beta / N
n_steps = 100000
x = [0.0] * N
data = []
Weight_trott = lambda y: math.exp(sum(-a **2/ 2.0 * dtau for a in y))
for step in range(n_steps):
Ncut = random.randint(0, N-1)
# x_new = levy_free_path(x[0], x[0], dtau, N)
x_new = levy_free_path(x[0], x[Ncut], dtau, Ncut) + x[Ncut:]
if random.uniform(0, 1) < min(1, Weight_trott(x_new) / Weight_trott(x)):
x = x_new[:]
k = random.randint(0, N - 1)
data.append(x[k])
print len(data)
pylab.hist(data, bins=50, normed=True, label='QMC')
x_values = [0.1 * a for a in range (-30, 30)]
y_values = [math.sqrt(math.tanh(beta / 2.0)) / math.sqrt(math.pi) * \
math.exp( - xx **2 * math.tanh( beta / 2.0)) for xx in x_values]
pylab.plot(x_values, y_values, label='exact')
pylab.xlabel('$x$')
pylab.ylabel('$\\pi(x)$ (normalized)')
pylab.axis([-3.0, 3.0, 0.0, 0.8])
pylab.legend()
ProgType = 'Levy_free_path'
pylab.title(ProgType + ' beta = ' + str(beta) + ', dtau = ' + str(dtau) +
', Nsteps = '+ str(n_steps))
pylab.savefig(ProgType + str(beta) + '.png')
pylab.show()
| [
"noelevans@gmail.com"
] | noelevans@gmail.com |
06ffea8d37e7baecbc877318ae07f0960176aa71 | 1255cedc3b8c486f07fb12b90b75b8773b4714be | /xnote/app/migrations/0002_auto_20210704_1851.py | ab7cafc76b864f0fe4f3aa7f3cbd0fcd44849f6c | [
"Apache-2.0"
] | permissive | sebastianczech/Xnote | 81c4cd00b2759037b2e538172ca70abdfba2740c | 6b6785f5d1db37322b74818aa355eddad3a7a8a9 | refs/heads/main | 2023-07-19T14:22:43.026363 | 2021-09-18T14:15:54 | 2021-09-18T14:15:54 | 376,524,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,100 | py | # Generated by Django 3.2.4 on 2021-07-04 18:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='walletaccount',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletcar',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletcredit',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletdeposit',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletexpense',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='wallethouse',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
migrations.AlterField(
model_name='walletincome',
name='month',
field=models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=7),
),
]
| [
"sebaczech@gmail.com"
] | sebaczech@gmail.com |
70e34f850771c4cfeaa578be02d172c9455bbe17 | b1445fff58b40103cf689721992315a6631c2c28 | /telegrasp.py | 7fc7c07304cdee826a2e47dbe1d5be51508f86b9 | [] | no_license | pratyush19919/Raspberry-Pi-and-Telegram-App-Control-GPIO-s | dc0ad14f0e3612922d203d6f810995b05bf86bef | f560c9c5a69da477f6f01d1c99ab7e99b4f9a340 | refs/heads/master | 2022-12-15T04:02:24.287637 | 2020-09-10T19:08:30 | 2020-09-10T19:08:30 | 289,351,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,223 | py | #import required libraries & packages
import telepot #for telegram
import time,datetime
import RPi.GPIO as GPIO
import requests #for web-scraping
import random
from bs4 import BeautifulSoup # for parsing
from telepot.loop import MessageLoop
GPIO.setmode(GPIO.BOARD)
#setting & initializing pins
Relay1=16
led =10
Relay2=12
GPIO.setwarnings(False)
GPIO.setup(led,GPIO.OUT)
GPIO.output(led,0)
GPIO.setup(Relay1,GPIO.OUT)
GPIO.output(Relay1,0)
GPIO.setup(Relay2,GPIO.OUT)
GPIO.output(Relay2,0)
def action(msg):
chat_id=msg["chat"]["id"] #chat_id contains the header information for message
command=msg["text"] #command contains the text that we write in chat
print('Received: %s' %command)
print(msg["chat"]["id"])
if "date" in command: # to get date
message="The Date is "+str(datetime.datetime.now().strftime("%d/%m/%Y"))
telegram_bot.sendMessage(chat_id,message)
if "time" in command: # to get time
message="The Time is "+str(datetime.datetime.now().strftime("%H:%M:%S"))
telegram_bot.sendMessage(chat_id,message)
# to get top 10 headlines of news
if "news" in command: # if your message containes "news" , it replies you top headlines of the day
message="<==:::: Today's headlines are ::::==> \n"
for i, x in enumerate(scrape()):
message += "----------------------------\n"
message += str(i+1) + " " + x + "\n"
telegram_bot.sendMessage(chat_id,message)
# to control raspberry pi gpio's
if "on" in command:
message="Turned On"
if "Led" in command:
GPIO.output(led,1)
message=message + " Led"
if "Relay1" in command:
GPIO.output(Relay1,0)
message=message + " Relay1"
if "Relay2" in command:
GPIO.output(Relay2,0)
message=message + " Relay2"
telegram_bot.sendMessage(chat_id, message)
if "off" in command:
message="Turned Off"
if "Led" in command:
GPIO.output(led,0)
message=message + " Led"
if "Relay1" in command:
GPIO.output(Relay1,1)
message=message + " Relay1"
if "Relay2" in command:
GPIO.output(Relay2,1)
message=message + " Relay2"
#print(chat_id)
telegram_bot.sendMessage(chat_id, message)
def scrape():# Function for scraping the news website for getting the headlines
news=[]
url = "https://www.indiatoday.in/news.html" # url of website that we want to scrape
res=requests.get(url)
code=BeautifulSoup(res.text,"lxml")
head=code.find_all("p",class_="story")
for i in range(0,10):
news.append(str(head[random.randint(0,len(head))-1].text))
return news #returns list of headlines in the website
telegram_bot=telepot.Bot("******************************************")#API Key you get from the bot-father in telegram app
print(telegram_bot.getMe())
MessageLoop(telegram_bot,action).run_as_thread()
print("started and running")
while True:
time.sleep(1000)
| [
"noreply@github.com"
] | pratyush19919.noreply@github.com |
eba0cd90799ab695a36c1fe7f44805e350c2d266 | 45da48ae0a87f4bb27409bfe2e947b29a2d4a0d0 | /znake/systest/data/fails/systest/tests/test_systest.py | b8cd8024868589412413ece7cb15171aecabc6bf | [
"Apache-2.0"
] | permissive | per-bohlin/opensourcelib | 3923165982ae1b2c78602a3485684ded75c28c36 | e48427fd0b5d87ea21484e85d2575c8b8879b9a3 | refs/heads/master | 2020-05-21T21:34:15.112527 | 2019-05-11T16:57:58 | 2019-05-11T16:57:58 | 186,156,987 | 0 | 0 | NOASSERTION | 2019-05-11T16:34:39 | 2019-05-11T16:34:39 | null | UTF-8 | Python | false | false | 37 | py | def test_systest():
assert False
| [
"per.bohlin@zenterio.com"
] | per.bohlin@zenterio.com |
57c678204bd439cd4206439a9a1b42192f35babd | 356d9ac141206f98f991bd3d136b35485b228d21 | /advance_python_class_3/Homework1/misha_textgame.py | 76231f641c5858197ec327a7e7246173e5f64a6e | [
"MIT"
] | permissive | mishka28/NYU-Python | bd86eaa8096e487d9639fb16e426074d6593630a | 7309ac6890ddaa86a6e2d0113e99d8633477e503 | refs/heads/master | 2021-01-25T09:10:48.320431 | 2019-02-24T18:32:02 | 2019-02-24T18:32:02 | 93,797,349 | 0 | 0 | null | 2017-06-24T14:43:09 | 2017-06-08T22:38:49 | Shell | UTF-8 | Python | false | false | 3,838 | py | #!/usr/bin/env python3
from random import randint
class Character:
def __init__(self):
# self.name = ""
# self.type = ""
# self.health = 1
# self.health_max = 1
# self.mana = 1
# self.mana_man = 1
# self.defence = 0.5 #Percentage from 1 to 100
# self.attack = 1
# self.healpower = 1
return
def do_damage(self, target):
damage = self.attack * (1 - target.defence)
target.health = target.health - damage
return()
def heal(self, heal_target):
heal_target.health = min(heal_target.health + self.healpower, heal_target.health_max)
return()
class Boss(Character):
def __init__(self):
# Character.__init__(self)
self.name = "boss"
self.type = "Boss"
self.health = 500
self.health_max = 500
self.mana = 1
self.mana_man = 1
self.defence = 0.60 # Percentage from 1 to 100
self.attack = 55
self.healpower = 0
# self.target = Tank(self) ### what happens is tank dies
class Tank(Character):
def __init__(self):
# Character.__init__(self)
self.name = "tank"
self.type = "Tank"
self.health = 200
self.health_max = 200
self.mana = 1
self.mana_man = 1
self.defence = 0.5 #Percentage from 1 to 100
self.attack = 10
self.healpower = 0
# Boss(self)
# def attack(self, target):
# self.target = target
# self.do_damage(self.target)
class Healer(Character):
def __init__(self):
Character.__init__(self)
self.name = "healer"
self.type = "Healer"
self.health = 70
self.health_max = 70
self.mana = 90
self.mana_man = 90
self.defence = 0.1 #Percentage from 1 to 100
self.attack = 5
self.healpower = 20
self.healmanacost = 7
self.manaregen = 3
class Archer(Character):
def __init__(self):
Character.__init__(self)
self.name = "archer"
self.type = "Archer"
self.health = 90
self.health_max = 90
self.mana = 30
self.mana_man = 10
self.defence = 0.15 #Percentage from 1 to 100
self.attack = 30
self.healpower = 0
if __name__ == "__main__":
boss = Boss()
tank = Tank()
healer = Healer()
archer = Archer()
# tank.target = boss
rounds = 30
# print(tank.attack(boss).target.name)
# print(tank.target.health)
print("Tank`s current health {}".format(tank.health))
print("Boss`s current health {}".format(boss.health))
for round in range(rounds):
if tank.health >= 0:
boss.do_damage(tank)
tank.do_damage(boss)
archer.do_damage(boss)
if boss.health <= 0:
boss.health = max(boss.health , 0)
print("{} is dead in {} rounds".format(boss.name,round))
break
print("Boss`s current health {} round {}".format(boss.health,round))
if healer.mana >= healer.healmanacost:
healer.heal(tank)
print("Tank`s current health after heal {}".format(tank.health))
healer.mana = healer.mana - healer.healmanacost
else:
print("healer has no nough mana {}".format(healer.mana))
else:
print("Tank is dead, boss is killing the archer")
if archer.health >= 0:
boss.do_damage(archer)
archer.do_damage(boss)
if boss.health <= 0:
boss.health = max(boss.health , 0)
print("{} is dead in {} rounds".format(boss.name,round))
break
if healer.mana >= healer.healmanacost:
healer.heal(archer)
healer.mana = healer.mana - healer.healmanacost
# tank.do_damage(boss)
else:
print("healer has no nough mana {}".format(healer.mana))
else:
print("you failed the Raid")
break
healer.mana = healer.mana + healer.manaregen
# boss.do_damage(tank)
print("Tank`s current health {}".format(tank.health))
print("Boss`s current health {}".format(boss.health))
print("Healer`s current mana {}".format(healer.mana))
print("Archer`s current health {}".format(archer.health))
| [
"mishiko28_chigo@yahoo.com"
] | mishiko28_chigo@yahoo.com |
45a096453041251fe1c13b08d4e0f339ccb45baf | 469cb03e5e88da9abdca3802081b1814259bdb46 | /pysstv/__main__.py | ac1e7f1a09689b8dad809ae451fd65cb25e2c11a | [
"MIT"
] | permissive | omkolhe/pySSTV | 089e9f3ed46385a58abde5e5392513a4a84b23aa | da8d8f16ba61bab4fc3c35754c81687f76365b01 | refs/heads/master | 2021-09-01T18:41:42.525613 | 2017-12-28T08:49:33 | 2017-12-28T08:49:33 | 115,403,676 | 0 | 0 | null | 2017-12-26T08:43:16 | 2017-12-26T08:43:16 | null | UTF-8 | Python | false | false | 2,185 | py | #!/usr/bin/env python
from __future__ import print_function
from PIL import Image
from argparse import ArgumentParser
from sys import stderr
from pysstv import color, grayscale
SSTV_MODULES = [color, grayscale]
def main():
module_map = build_module_map()
parser = ArgumentParser(
description='Converts an image to an SSTV modulated WAV file.')
parser.add_argument('img_file', metavar='image.png',
help='input image file name')
parser.add_argument('wav_file', metavar='output.wav',
help='output WAV file name')
parser.add_argument(
'--mode', dest='mode', default='MartinM1', choices=module_map,
help='image mode (default: Martin M1)')
parser.add_argument('--rate', dest='rate', type=int, default=48000,
help='sampling rate (default: 48000)')
parser.add_argument('--bits', dest='bits', type=int, default=16,
help='bits per sample (default: 16)')
parser.add_argument('--vox', dest='vox', action='store_true',
help='add VOX tones at the beginning')
parser.add_argument('--fskid', dest='fskid',
help='add FSKID at the end')
parser.add_argument('--chan', dest='chan', type=int,
help='number of channels (default: mono)')
args = parser.parse_args()
image = Image.open(args.img_file)
mode = module_map[args.mode]
if not all(i >= m for i, m in zip(image.size, (mode.WIDTH, mode.HEIGHT))):
print(('Image must be at least {m.WIDTH} x {m.HEIGHT} pixels '
'for mode {m.__name__}').format(m=mode), file=stderr)
raise SystemExit(1)
s = mode(image, args.rate, args.bits)
s.vox_enabled = args.vox
if args.fskid:
s.add_fskid_text(args.fskid)
if args.chan:
s.nchannels = args.chan
s.write_wav(args.wav_file)
def build_module_map():
module_map = {}
for module in SSTV_MODULES:
for mode in module.MODES:
module_map[mode.__name__] = mode
return module_map
if __name__ == '__main__':
main()
| [
"omkolhe026@gmail.com"
] | omkolhe026@gmail.com |
a4f391c12ed7d15a453b0b814ea0c2e443125c85 | 02b26f97f268c9b52d0680373d116ca375985f0e | /button.py | 0f0262f1fd43e30f1fa84e2d31a17ed44d27d588 | [] | no_license | JacekWajdzik/alien-invasion | 0883c0b9bfcd99a8092dc289d596d7b5380ac931 | c04fb2fd3ccf0b3603dc41b80f1ecf40c1803519 | refs/heads/master | 2022-10-28T10:47:57.329306 | 2020-06-11T19:40:53 | 2020-06-11T19:40:53 | 271,631,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | import pygame.font
class Button():
def __init__(self, ai_settings, screen, msg):
'''Inicjalizacja artybutow przycisku'''
self.screen = screen
self.screen_rect = screen.get_rect()
#Zdefiniowanie wymiarow przycisku
self.width, self.height = 200, 50
self.button_color = (230, 230, 250)
self.text_color = (72, 61, 139)
self.font = pygame.font.SysFont(None, 48)
#Utworzenie prostokata przycisku i wysrodkowanie go
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect.center = self.screen_rect.center
#Komunikat wyswietlany przez przycisk trzeba przygotowac jednokrotnie
self.prep_msg(msg)
def prep_msg(self, msg):
'''Umieszczenie komunikatu w obrazie i wysrodkowanie tekstu'''
self.msg_image = self.font.render(msg, True, self.text_color,
self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
'''Wyswietlenie przycisku'''
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect)
| [
"66798436+JacekWajdzik@users.noreply.github.com"
] | 66798436+JacekWajdzik@users.noreply.github.com |
ef1c3842e4def65a489bb02d1b5e6ceffb8692bf | e56214188faae8ebfb36a463e34fc8324935b3c2 | /test/test_appliance_upgrade_ref.py | 1ae1a993fdadb83c88a412f85ab4532318492641 | [
"Apache-2.0"
] | permissive | CiscoUcs/intersight-python | 866d6c63e0cb8c33440771efd93541d679bb1ecc | a92fccb1c8df4332ba1f05a0e784efbb4f2efdc4 | refs/heads/master | 2021-11-07T12:54:41.888973 | 2021-10-25T16:15:50 | 2021-10-25T16:15:50 | 115,440,875 | 25 | 18 | Apache-2.0 | 2020-03-02T16:19:49 | 2017-12-26T17:14:03 | Python | UTF-8 | Python | false | false | 1,923 | py | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.appliance_upgrade_ref import ApplianceUpgradeRef # noqa: E501
from intersight.rest import ApiException
class TestApplianceUpgradeRef(unittest.TestCase):
"""ApplianceUpgradeRef unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testApplianceUpgradeRef(self):
"""Test ApplianceUpgradeRef"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.appliance_upgrade_ref.ApplianceUpgradeRef() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"ucs-build@github.com"
] | ucs-build@github.com |
62dd346e363b9f7c5eec5996953aa62a4899e307 | bc958f72cb3d385001e1952b853f34f341c908e8 | /dvwa_bruteforce.py | a628b8c189666df2ca94fe5c0d3c98490772ded0 | [] | no_license | AndreMessi/security_tools | 458cf12e2e7aa21afb68b193e2f76388b53ff45f | 506071198640e2502fe9ec31d21b7eb3c3cbc1c8 | refs/heads/master | 2020-04-21T13:32:20.596892 | 2019-04-22T06:15:26 | 2019-04-22T06:15:26 | 169,602,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,450 | py | #!/usr/bin/python
# Quick PoC template for brute force HTTP GET form
# Target: DVWA v1.10 (Brute Force - Low)
# Date: 2015-10-25
# Author: g0tmi1k ~ https://blog.g0tmi1k.com/
# Source: https://blog.g0tmi1k.com/2015/10/dvwa-bruteforce-low/
import requests
import sys
import re
from BeautifulSoup import BeautifulSoup
# Variables
target = 'http://192.168.1.44/DVWA'
sec_level = 'low'
dvwa_user = 'admin'
dvwa_pass = 'password'
user_list = '/usr/share/seclists/Usernames/top_shortlist.txt'
pass_list = '/usr/share/seclists/Passwords/rockyou.txt'
# Value to look for in response header (Whitelisting)
success = 'Welcome to the password protected area'
# Get the anti-CSRF token
def csrf_token():
try:
# Make the request to the URL
print "\n[i] URL: %s/login.php" % target
r = requests.get("{0}/login.php".format(target), allow_redirects=False)
except:
# Feedback for the user (there was an error) & Stop execution of our request
print "\n[!] csrf_token: Failed to connect (URL: %s/login.php).\n[i] Quitting." % (target)
sys.exit(-1)
# Extract anti-CSRF token
soup = BeautifulSoup(r.text)
user_token = soup("input", {"name": "user_token"})[0]["value"]
print "[i] user_token: %s" % user_token
# Extract session information
session_id = re.match("PHPSESSID=(.*?);", r.headers["set-cookie"])
session_id = session_id.group(1)
print "[i] session_id: %s" % session_id
return session_id, user_token
# Login to DVWA core
def dvwa_login(session_id, user_token):
# POST data
data = {
"username": dvwa_user,
"password": dvwa_pass,
"user_token": user_token,
"Login": "Login"
}
# Cookie data
cookie = {
"PHPSESSID": session_id,
"security": sec_level
}
try:
# Make the request to the URL
print "\n[i] URL: %s/login.php" % target
print "[i] Data: %s" % data
print "[i] Cookie: %s" % cookie
r = requests.post("{0}/login.php".format(target), data=data, cookies=cookie, allow_redirects=False)
except:
# Feedback for the user (there was an error) & Stop execution of our request
print "\n\n[!] dvwa_login: Failed to connect (URL: %s/login.php).\n[i] Quitting." % (target)
sys.exit(-1)
# Wasn't it a redirect?
if r.status_code != 301 and r.status_code != 302:
# Feedback for the user (there was an error again) & Stop execution of our request
print "\n\n[!] dvwa_login: Page didn't response correctly (Response: %s).\n[i] Quitting." % (r.status_code)
sys.exit(-1)
# Did we log in successfully?
if r.headers["Location"] != 'index.php':
# Feedback for the user (there was an error) & Stop execution of our request
print "\n\n[!] dvwa_login: Didn't login (Header: %s user: %s password: %s user_token: %s session_id: %s).\n[i] Quitting." % (
r.headers["Location"], dvwa_user, dvwa_pass, user_token, session_id)
sys.exit(-1)
# If we got to here, everything should be okay!
print "\n[i] Logged in! (%s/%s)\n" % (dvwa_user, dvwa_pass)
return True
# Make the request to-do the brute force
def url_request(username, password, session_id):
# GET data
data = {
"username": username,
"password": password,
"Login": "Login"
}
# Cookie data
cookie = {
"PHPSESSID": session_id,
"security": sec_level
}
try:
# Make the request to the URL
#print "\n[i] URL: %s/vulnerabilities/brute/" % target
#print "[i] Data: %s" % data
#print "[i] Cookie: %s" % cookie
r = requests.get("{0}/vulnerabilities/brute/".format(target), params=data, cookies=cookie, allow_redirects=False)
except:
# Feedback for the user (there was an error) & Stop execution of our request
print "\n\n[!] url_request: Failed to connect (URL: %s/vulnerabilities/brute/).\n[i] Quitting." % (target)
sys.exit(-1)
# Was it a ok response?
if r.status_code != 200:
# Feedback for the user (there was an error again) & Stop execution of our request
print "\n\n[!] url_request: Page didn't response correctly (Response: %s).\n[i] Quitting." % (r.status_code)
sys.exit(-1)
# We have what we need
return r.text
# Main brute force loop
def brute_force(session_id):
# Load in wordlists files
with open(pass_list) as password:
password = password.readlines()
with open(user_list) as username:
username = username.readlines()
# Counter
i = 0
# Loop around
for PASS in password:
for USER in username:
USER = USER.rstrip('\n')
PASS = PASS.rstrip('\n')
# Increase counter
i += 1
# Feedback for the user
print ("[i] Try %s: %s // %s" % (i, USER, PASS))
# Make request
attempt = url_request(USER, PASS, session_id)
#print attempt
# Check response
if success in attempt:
print ("\n\n[i] Found!")
print "[i] Username: %s" % (USER)
print "[i] Password: %s" % (PASS)
return True
return False
# Get initial CSRF token
session_id, user_token = csrf_token()
# Login to web app
dvwa_login(session_id, user_token)
# Start brute forcing
brute_force(session_id) | [
"hninja049@gmail.com"
] | hninja049@gmail.com |
1950ca67c91e388304f292a61ab1f1f8d45060a8 | 85690414e489c1f3473c261c25a53cb888b58a52 | /exercises/ex2_factorial_given_number.py | 76517172b1c2ef15b455d27bf61b294255025c8a | [
"MIT"
] | permissive | ivanleoncz/PythonEggs | eb95c16b8632fd7782f707defca2295c871c06ff | 540843dcf6fba4b3fe0d6d57dd19654f33cccb74 | refs/heads/master | 2022-01-13T06:15:04.315564 | 2019-06-06T05:40:11 | 2019-06-06T05:40:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py |
""" Write a program which can compute the factorial of a given numbers.
The results should be printed in a comma-separated sequence on a single line.
Suppose the following input is supplied to the program:
8
Then, the output should be:
40320
"""
number = int(input("\nProvide a number for Factorial calculation, please: "))
def factorial(n):
if n == 1:
return n
else:
return n * factorial(n - 1)
print("\nAnswer:\n",factorial(number))
| [
"ivanlmj@gmail.com"
] | ivanlmj@gmail.com |
4d28d031c27a0637460b632a9b19cba410228c5b | ebe29aa1cc69cd4de540f1310086bac47f3bbc38 | /fakturo/billingstack/auth.py | 637df96d8fe8f8a7f0b3a14ac9b442e3569ba857 | [
"Apache-2.0"
] | permissive | billingstack/python-fakturo-billingstack | b352262adc5c7046c46ff464290abafd709e8049 | fb641b43ee0ab2a92aea64cc010c989bfbfe5436 | refs/heads/master | 2021-01-10T21:39:35.998727 | 2013-04-05T22:01:15 | 2013-04-05T22:01:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,879 | py | import logging
import simplejson as json
from requests.auth import AuthBase
from fakturo.core import client
LOG = logging.getLogger(__name__)
class AuthHelper(AuthBase, client.BaseClient):
def __init__(self, url, username=None, password=None,
account_name=None):
super(AuthHelper, self).__init__(url)
self.auth_info = {}
if not account_name:
raise ValueError('No account given.')
cred_info = {
'username': username,
'password': password,
'merchant': account_name
}
self.cred_info = cred_info
if self.cred_valid:
self.refresh_auth()
@property
def cred_valid(self):
c = self.cred_info
return True if c.get('username') and c.get('password') else False
def get_token_key(self, key):
"""
Return something from the token info, None if no key or no info is
there.
:param key: What to get
"""
token_info = self.auth_info.get('token')
return token_info.get('id') if token_info else token_info
@property
def token(self):
return self.get_token_key('id')
@property
def endpoint(self):
return self.auth_info.get('endpoint')
@property
def account(self):
return self.auth_info.get('merchant')
def __call__(self, request):
if not self.token and self.cred_valid:
self.refresh_auth()
request.headers['X-Auth-Token'] = self.token
return request
def refresh_auth(self):
auth_data = dict([(k, v) for k, v in self.cred_info.items() if v])
LOG.debug('Authenticating on URL %s CREDENTIALS %s' %
(self.url, auth_data))
response = self.post('/authenticate', data=json.dumps(auth_data))
self.auth_info.update(response.json)
| [
"endre.karlson@gmail.com"
] | endre.karlson@gmail.com |
20e95818e7318e08fa18ba1273624083ab2189e1 | 55cb9e38dac8abb5745fddd17958ad05d69f1aae | /3 if states/3-1/8.py | 35c6e5dbf26cfa05be3991f5f636b84f53321ef7 | [] | no_license | gry-kiu/python-tutorial | 24ef6f47304b96af7b8fcd652565f431fc64f434 | 313c08c1441f64c146da22a6be2732a5869b8609 | refs/heads/master | 2022-11-16T03:10:09.714737 | 2020-07-13T06:07:20 | 2020-07-13T06:07:20 | 272,834,702 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 240 | py | # 입력을 받습니다.
number = input("정수 입력> ")
last_character = number[-1]
# 짝수 조건
if last_character in "02468":
print("짝수입니다")
# 홀수 조건
if last_character in "13579":
print("홀수입니다")
| [
"gry17@kiu.kr"
] | gry17@kiu.kr |
ff6ba23c9da3046c81bff48de8dc5c284b850763 | 0db8df4e153e3ea187847819661de880e196eca6 | /practice22.py | 99a8a60cb5eb9f823234f4039142a9913546a1e5 | [] | no_license | cis-04/primitive-python | 4763e2c74f0f354dd19942bedabb7ddec3a73c13 | 44f0c498b6bdce3eff26fb59ac04cdd04af87f37 | refs/heads/main | 2023-03-21T19:51:24.863405 | 2021-03-21T01:55:20 | 2021-03-21T01:55:20 | 349,877,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | try:
R = int(input("정수를 입력하세요."))
except:
print("정수를 입력하세요!")
else:
if R % 4 == 0:
print("{}를 4로 나누면 나누어 떨어진다.".format(R))
elif R % 4 != 0:
remain = R % 4
print("{}를 4로 나누면 나머지는 {} 이다.".format(R,remain)) | [
"noreply@github.com"
] | cis-04.noreply@github.com |
b6c702423ffc356145e87486403b518ff17dc23a | 6562a388b69e50c0ff4641dd25724c7c5fd89edd | /python/2018_Edaily/counting_edge01.py | ec03b7b977f89d0b6d8f3db17ee07595da3e0e16 | [] | no_license | jangjooch/Python_study | 84555cd3a200eb07e8ff8557e401d99fe24e4ef5 | d4f85b0446daf150f37ed27510817591c4a0c569 | refs/heads/master | 2021-06-26T07:59:47.976992 | 2019-07-16T02:52:05 | 2019-07-16T02:52:05 | 152,009,370 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py |
init = input()
init = int(init)
array = list()
sum = int(0)
contents = int(1)
for i in range(init):
for j in range(init):
array.append(contents)
contents = contents + 1
print(array)
maxidx = len(array)-1
storage = list()
for i in range((init*2)):
if i<init :
storage.append(array[i])
array.remove(i)
else:
storage.append(array[maxidx])
array.remove(maxidx)
maxidx = maxidx - 1
print(storage)
print(array) | [
"37062379+jangjooch@users.noreply.github.com"
] | 37062379+jangjooch@users.noreply.github.com |
81db3d91a8388fbfdc455bfb6d31a7581320f80b | 6222d83f32d24ea742eca7973017e4819a282a8f | /clone_Projet/Project_4/delivery_order.py | 5c585fc122e7f005d0f015fe789da2938d403d0f | [] | no_license | enjoyone8/My_Code | 00d1e7006ed65b4791499af3e383fe65a79a7382 | 8e16fbb4c7f36a6daae8194e41fae70f32ff82ef | refs/heads/master | 2021-01-20T12:58:20.820900 | 2017-08-09T12:08:00 | 2017-08-09T12:10:03 | 90,435,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,405 | py | # -*-coding=utf-8-*-
__author__ = 'Rocky'
#交割单处理
import os,datetime
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.set_option('display.max_rows',None)
class Delivery_Order():
def __init__(self):
print "Start"
path=os.path.join(os.getcwd(),'private')
if os.path.exists(path)==False:
os.mkdir(path)
os.chdir(path)
#合并一年的交割单
def years(self):
df_list=[]
k=[str(i) for i in range(1,13)]
print k
j=[i for i in range(1,13)]
result=[]
for i in range(1,13):
filename='2016-%s.xls' %str(i).zfill(2)
#print filename
t=pd.read_table(filename,encoding='gbk',dtype={u'证券代码':np.str})
fee=t[u'手续费'].sum()+t[u'印花税'].sum()+t[u'其他杂费'].sum()
print i," fee: "
print fee
df_list.append(t)
result.append(fee)
df=pd.concat(df_list,keys=k)
#print df
#df.to_excel('2016_delivery_order.xls')
self.caculation(df)
plt.plot(j,result)
plt.show()
def caculation(self,df):
fee=df[u'手续费'].sum()+df[u'印花税'].sum()+df[u'其他杂费'].sum()
print fee
#计算每个月的费用
def month(self):
pass
def main():
obj=Delivery_Order()
obj.years()
main()
| [
"enjoyone8@163.com"
] | enjoyone8@163.com |
e8598e0daa782c80295ffe2b35fbb064e4fb5a6b | 6e015b8c884847a812ef5ec51181f7dc7cf5ad4c | /model/eitr/transformer_decoder.py | 6e4ff06a1c9576911c659a35b0c89c92071bb1cb | [] | no_license | tlwzzy/ET-Net | f67b9c9bb0b639a49781c35abb0fa76902a77057 | 3806acdf27d3534498f9e49c38a93b1de12d9b93 | refs/heads/master | 2023-08-13T09:32:41.573687 | 2021-09-29T09:26:58 | 2021-09-29T09:26:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,197 | py | import torch
from torch import nn
import torch.nn.functional as F
import copy
class transformer_decoder(nn.Module):
def __init__(self, d_model=256, nhead=8, num_decoder_layers=6, dim_feedforward=2048, activation='relu', dropout=0.1):
super().__init__()
self.d_model = d_model
self.nhead = nhead
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
dropout, activation)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, tgt, memory):
output = self.decoder(tgt, memory)
return output
class TransformerDecoder(nn.Module):
def __init__(self, encoder_layer, num_layers):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
def forward(self, tgt, memory):
output = tgt
for layer in self.layers:
output = layer(output, memory)
return output
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu"):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.sattn_dropout = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
self.cross_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.cattn_dropout = nn.Dropout(dropout)
self.norm21 = nn.LayerNorm(d_model)
self.norm22 = nn.LayerNorm(d_model)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.activation = _get_activation_fn(activation)
self.ffn_dropout1 = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.ffn_dropout2 = nn.Dropout(dropout)
self.norm3 = nn.LayerNorm(d_model)
def with_embed(self, tensor, pos):
return tensor if pos is None else tensor + pos
def forward(self, tgt, memory):
# self attention
q = k = v = self.norm1(tgt)
tgt1 = self.self_attn(q, k, v)[0]
tgt2 = tgt + self.sattn_dropout(tgt1)
# cross attention
q = self.norm21(tgt2)
k = v = self.norm22(memory)
tgt3 = self.cross_attn(q, k, v)[0]
tgt4 = tgt2 + self.cattn_dropout(tgt3)
# FFN
tgt5 = self.norm3(tgt4)
tgt6 = self.linear2(self.ffn_dropout1(self.activation(self.linear1(tgt5))))
tgt7 = tgt4 + self.ffn_dropout2(tgt6)
return tgt7
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
def build_transformer(args):
return transformer(**args)
| [
"wengwm419@163.com"
] | wengwm419@163.com |
b0789b65346da9d46568ef7fc745efe52ce14c2c | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_rhetoricians.py | b82e3e2f934329cba730d00cb0c53fa56ef00f97 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py |
from xai.brain.wordbase.nouns._rhetorician import _RHETORICIAN
#calss header
class _RHETORICIANS(_RHETORICIAN, ):
def __init__(self,):
_RHETORICIAN.__init__(self)
self.name = "RHETORICIANS"
self.specie = 'nouns'
self.basic = "rhetorician"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
47ca804a3d8883b44e584a1195f25679b610f48e | 505cc4a10a4aa7ce6becbff10e6ac1b5037a825b | /consensus_input.py | 7a87960f5e5376eb7e393902b408bacae94b3b12 | [] | no_license | Dirivian/Current-Work | adbf86f1390ca9833d6cb0c156edac1aa2d2d7fc | 14f61f6fe9572ccaad4ea000cb49490a75794b15 | refs/heads/master | 2021-01-21T06:14:00.601567 | 2018-07-05T06:51:34 | 2018-07-05T06:51:34 | 82,861,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 7 16:07:26 2017
@author: user
"""
import numpy as np
from scipy import integrate
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
N =3
def Laplacian(X,t):
L = np.ones((5,5))
for i in range(5):
L[i,i]=-5
#print(L.dot(X))
N=3
L1 = np.ones((N,N))
for i in range(N):
L1[i,i]=-(N-1)
return L1.dot(X)+ [3,0,-3]
fig = plt.figure()
#ax = fig.gca(projection='3d')
#circa =np.linspace(0,2*np.pi,100)
L1 = np.ones((N,N))
for i in range(N):
L1[i,i]=-(N-1)
a= np.random.randint(10,size=N)
#b = np.linalg.solve(L1,-a)
dt =0.01
x=a
alpha = 0.6
xvec = [a]
for i in range(200):
x = x+alpha*Laplacian(x,20)
xvec= xvec+[x]
tspace = np.linspace(0,13,int(13/dt))
asol = integrate.odeint(Laplacian,a , tspace)
plt.plot(tspace,asol)
plt.ylabel('States')
plt.xlabel('Times')
plt.show | [
"jithindgeorge93@gmail.com"
] | jithindgeorge93@gmail.com |
fee51756e1d2f35a94346391ab6947669f32f3e5 | e09ca015952d06ad35342660f42a53edbb19fa2b | /urls.py | 81f56fdec39758cb7db23674d613f88880be9edf | [] | no_license | bartdob/weatherApi | 9b313147202a48c62ca85f595ba3652f2d27db22 | a545adf7409fdb6ebc9caa4c492a9c7773aab012 | refs/heads/master | 2022-12-16T21:00:38.713628 | 2020-09-08T17:58:47 | 2020-09-08T17:58:47 | 298,189,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='home'),
path('delete/<cityName>/', views.deleteCity, name='deleteCity'),
] | [
"dobry1@pm.me"
] | dobry1@pm.me |
4b7c937f22f3014ec84bad9e620ce8522f0d431f | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /bin/jupyter-qtconsole | 488a1d74540d18578cde9d0aa14b719fbdb5f409 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 101 | #!/usr/bin/env python
from qtconsole.qtconsoleapp import main
if __name__ == '__main__':
main()
| [
"wgyumg@mgail.com"
] | wgyumg@mgail.com | |
3043f1af617ce163e4a21e756bc266e9fb7c522f | 647ad66aa2371cfa506db0b0779c2b98a98ed293 | /KingdeeDataExport/qt_test/Ui_main_form.py | d6cfe6250cc7288b3bff978c87a12900befc5a38 | [
"Apache-2.0"
] | permissive | Gatorix/accounting_tools | e5780cfc5c80e7ab6463f8dd226cd4b8700d98f8 | 926ee446048c435f648c2461631a4d663f74828f | refs/heads/master | 2021-08-09T12:17:01.816599 | 2020-12-10T10:04:21 | 2020-12-10T10:04:21 | 230,381,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,317 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'c:\Users\ZCY-CW\Documents\GitHub\KindeeDataProject\ui\main_form.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(294, 167)
self.pushButton = QtWidgets.QPushButton(Dialog)
self.pushButton.setGeometry(QtCore.QRect(510, 60, 75, 23))
self.pushButton.setObjectName("pushButton")
self.pushButton_2 = QtWidgets.QPushButton(Dialog)
self.pushButton_2.setGeometry(QtCore.QRect(520, 430, 75, 23))
self.pushButton_2.setObjectName("pushButton_2")
self.progressBar = QtWidgets.QProgressBar(Dialog)
self.progressBar.setGeometry(QtCore.QRect(20, 430, 471, 23))
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
self.widget = QtWidgets.QWidget(Dialog)
self.widget.setGeometry(QtCore.QRect(0, 0, 294, 130))
self.widget.setObjectName("widget")
self.gridLayout = QtWidgets.QGridLayout(self.widget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.formLayout = QtWidgets.QFormLayout()
self.formLayout.setObjectName("formLayout")
self.label = QtWidgets.QLabel(self.widget)
self.label.setObjectName("label")
self.formLayout.setWidget(
0, QtWidgets.QFormLayout.LabelRole, self.label)
self.dateEdit = QtWidgets.QDateEdit(self.widget)
self.dateEdit.setObjectName("dateEdit")
self.formLayout.setWidget(
0, QtWidgets.QFormLayout.FieldRole, self.dateEdit)
self.label_2 = QtWidgets.QLabel(self.widget)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(
1, QtWidgets.QFormLayout.LabelRole, self.label_2)
self.comboBox = QtWidgets.QComboBox(self.widget)
self.comboBox.setModelColumn(0)
self.comboBox.setObjectName("comboBox")
self.formLayout.setWidget(
1, QtWidgets.QFormLayout.FieldRole, self.comboBox)
self.label_3 = QtWidgets.QLabel(self.widget)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(
2, QtWidgets.QFormLayout.LabelRole, self.label_3)
self.lineEdit = QtWidgets.QLineEdit(self.widget)
self.lineEdit.setObjectName("lineEdit")
self.formLayout.setWidget(
2, QtWidgets.QFormLayout.FieldRole, self.lineEdit)
self.label_4 = QtWidgets.QLabel(self.widget)
self.label_4.setObjectName("label_4")
self.formLayout.setWidget(
3, QtWidgets.QFormLayout.LabelRole, self.label_4)
self.lineEdit_2 = QtWidgets.QLineEdit(self.widget)
self.lineEdit_2.setObjectName("lineEdit_2")
self.formLayout.setWidget(
3, QtWidgets.QFormLayout.FieldRole, self.lineEdit_2)
self.gridLayout.addLayout(self.formLayout, 0, 0, 1, 1)
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.checkBox = QtWidgets.QCheckBox(self.widget)
self.checkBox.setObjectName("checkBox")
self.verticalLayout.addWidget(self.checkBox)
self.checkBox_2 = QtWidgets.QCheckBox(self.widget)
self.checkBox_2.setObjectName("checkBox_2")
self.verticalLayout.addWidget(self.checkBox_2)
self.checkBox_3 = QtWidgets.QCheckBox(self.widget)
self.checkBox_3.setObjectName("checkBox_3")
self.verticalLayout.addWidget(self.checkBox_3)
self.gridLayout.addLayout(self.verticalLayout, 0, 1, 1, 1)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_6 = QtWidgets.QLabel(self.widget)
self.label_6.setObjectName("label_6")
self.horizontalLayout.addWidget(self.label_6)
self.lineEdit_3 = QtWidgets.QLineEdit(self.widget)
self.lineEdit_3.setObjectName("lineEdit_3")
self.horizontalLayout.addWidget(self.lineEdit_3)
self.toolButton = QtWidgets.QToolButton(self.widget)
self.toolButton.setObjectName("toolButton")
self.horizontalLayout.addWidget(self.toolButton)
self.gridLayout.addLayout(self.horizontalLayout, 1, 0, 1, 2)
self.widget1 = QtWidgets.QWidget(Dialog)
self.widget1.setGeometry(QtCore.QRect(90, 140, 201, 25))
self.widget1.setObjectName("widget1")
self.gridLayout_2 = QtWidgets.QGridLayout(self.widget1)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.pushButton_5 = QtWidgets.QPushButton(self.widget1)
self.pushButton_5.setObjectName("pushButton_5")
self.gridLayout_2.addWidget(self.pushButton_5, 0, 0, 1, 1)
self.pushButton_3 = QtWidgets.QPushButton(self.widget1)
self.pushButton_3.setObjectName("pushButton_3")
self.gridLayout_2.addWidget(self.pushButton_3, 0, 1, 1, 1)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "PushButton"))
self.pushButton_2.setText(_translate("Dialog", "PushButton"))
self.label.setText(_translate("Dialog", "日期:"))
self.label_2.setText(_translate("Dialog", "科目级次:"))
self.label_3.setText(_translate("Dialog", "科目代码:"))
self.label_4.setText(_translate("Dialog", "至:"))
self.checkBox.setText(_translate("Dialog", "科目余额表"))
self.checkBox_2.setText(_translate("Dialog", "财务报表"))
self.checkBox_3.setText(_translate("Dialog", "凭证"))
self.label_6.setText(_translate("Dialog", "文件保存路径:"))
self.toolButton.setText(_translate("Dialog", "..."))
self.pushButton_5.setText(_translate("Dialog", "导出"))
self.pushButton_3.setText(_translate("Dialog", "退出"))
| [
"caosheng0000@outlook.com"
] | caosheng0000@outlook.com |
103274d4dd7e04e72642cbed173b1a399eb4a13c | 6d311428fde9389f552a46784b26576f1ff52092 | /thirtyseventh.py | e0644ab8454acd08dcaa82b4c063f7108ec2075d | [] | no_license | hemanrnjn/CoriolisAssignment | c0e9c096e6a59c30dbcac5d76e70424c905fcd93 | ed5fb3af60a66db6e83fbf064022c701b007511d | refs/heads/master | 2020-03-28T06:11:23.988699 | 2018-09-10T09:57:09 | 2018-09-10T09:57:09 | 147,819,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | def enumerate_file(file_name):
file = open(file_name, 'r')
new_file = open('enumerated-' + file_name, 'w')
for i, line in enumerate(file):
new_file.write('{}. {}\n'.format(i+1, line.strip()))
file.close()
new_file.close()
file_name = input('Enter the file name\n')
enumerate_file(file_name)
| [
"himanshurnjn04@gmail.com"
] | himanshurnjn04@gmail.com |
67b408b0da1d63f7d5a38668a182b9bfb8e691d0 | f90e26811ec80a0ad6c82261e237d81067d344a5 | /PyExp/folderTool.py | f55e6dc0d639d0c0b223dca98f28097a9d910d75 | [] | no_license | JeffHabe/PythonWorkspace | 0cccd83ab0bff6bd8e59e27ef0cb04f0bc457b6f | 4bd0f46aa77e1882ba8a913ea9c3acf7e4b7ec2d | refs/heads/master | 2021-06-02T15:09:11.294191 | 2019-11-22T07:37:32 | 2019-11-22T07:37:45 | 132,264,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 2 13:52:32 2018
@author: Jeff PC
"""
from os import walk,makedirs
import datetime,time
import os.path as pth
#mypath ='excelFolder/'
import csv
def getFileName(mypath=''):
for (dirpath, dirnames, filenames) in walk(mypath):
f=list(filenames[i][:-4] for i in range(len(filenames)))
break
return f
def mkfolder(directory):
if not pth.exists(directory):
makedirs(directory)
def readCSV(mypath,fileName):
data=[]
times=[]
f = open(mypath+fileName+'.csv', 'r')
for row in csv.DictReader(f):
ms=float(row['timestamp'])
#print(ms)
date=datetime.datetime.utcfromtimestamp(ms)
#print(time.mktime(date.timetuple()))
#date1=datetime.datetime.utcfromtimestamp(ms).strftime('%Y-%m-%d %H:%M:%S.%f')
#print(date)
times.append(date)
data.append(float(row['value']))
f.close()
return (data,times)
#print(data)
| [
"sadhabe118@gmail.com"
] | sadhabe118@gmail.com |
352121d56b8a5bb9fa3eec78314000a59d9186b6 | b50508302647ad849029210bff200930b1902987 | /apps/articles/migrations/0001_initial.py | dcee0bc7423816df2b8733388e92bfed9f9a7652 | [] | no_license | tianjiajun123/myBlog | a46718ed3fde114bfa282428d0c8b7f36b5adce9 | 2cd67bc0e85974cda477c366db9f7051b8b11132 | refs/heads/master | 2023-02-15T11:12:37.266980 | 2021-01-06T10:58:50 | 2021-01-06T10:58:50 | 326,363,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,498 | py | # Generated by Django 3.1.4 on 2021-01-03 20:04
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Articles',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=128, verbose_name='文章标题')),
('img', models.ImageField(upload_to='', verbose_name='文章配图')),
('abstract', models.TextField(verbose_name='文章摘要')),
('content', models.TextField(verbose_name='文章内容')),
('visited', models.IntegerField(verbose_name='文章访问量')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('modified_at', models.DateTimeField(auto_now=True, verbose_name='修改时间')),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='文章作者')),
],
options={
'verbose_name': '文章',
'verbose_name_plural': '文章',
'ordering': ('-created_at',),
},
),
]
| [
"you@example.com"
] | you@example.com |
d0999586ccbd5cec385e34f8a7edbf19decb2542 | 4443d08048f9980045e5f0541c69db0d756391d1 | /partner_ngos/programs_management/doctype/project_indicator/test_project_indicator.py | 886c2b9e33e38f60e194f3c716e3dc39fa36f037 | [
"MIT"
] | permissive | mohsinalimat/partner_ngos | dea0db6e0f9718e7ffc69f7171bdb1603a055d72 | 4a345fb6989ff5a21db7fca07aa4e5174dca8f59 | refs/heads/master | 2023-03-15T13:15:40.571368 | 2020-07-09T07:22:59 | 2020-07-09T07:22:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Akram Mutaher and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestProjectIndicator(unittest.TestCase):
pass
| [
"frappe@ubuntu.vm"
] | frappe@ubuntu.vm |
5f65a914e6bae62808bebcdf2aac4be3d08d6c66 | f5b0db4a88e5d0e0aa4d43a7b6df0cccd227887e | /genetic/helpers/__init__.py | 735e7e82c7643c41570a290036fe6fe701ef8a73 | [] | no_license | balbok0/nn-arch-opt | 1bd70ac9f1c2c561c94f89dd839b62a90972de04 | c329ee96ff56d83fc41dbdeb368ccd93b5462552 | refs/heads/master | 2020-03-29T04:45:09.487622 | 2018-11-06T00:31:16 | 2018-11-06T00:31:16 | 149,546,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | import sys, os
sys.path.append(os.path.realpath(__file__)[:-11])
import helpers_other
import helpers_data
import helpers_mutate | [
"jakubflpk@gmail.com"
] | jakubflpk@gmail.com |
1846244645603c3069beec9522f089c045a558a3 | a86a1ccce08d1321f50fff61dc3b7533ce8910a2 | /core/management/commands/rename.py | e7633d7e297c267b3e3be96918420178934013f9 | [] | no_license | Allabergen/django-boilerplate | 19be29a6728b4dad3c35745340afaceb2beb20a5 | 906460dff74f49ad4882d1cc0e1264a0deedfae1 | refs/heads/master | 2021-06-17T18:37:32.877399 | 2019-07-25T16:23:04 | 2019-07-25T16:23:04 | 198,864,531 | 0 | 0 | null | 2021-04-16T20:43:59 | 2019-07-25T16:16:28 | Python | UTF-8 | Python | false | false | 950 | py | import os
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Renames a Django project'
def add_arguments(self, parser):
parser.add_argument('new_project_name', type=str,
help='The New Django Project Name')
def handle(self, *args, **kwargs):
new_project_name = kwargs['new_project_name']
files_to_rename = ['demo/settings/base.py',
'demo/wsgi.py', 'manage.py']
folder_to_rename = 'demo'
for f in files_to_rename:
with open(f, 'r') as file:
filedata = file.read()
filedata = filedata.replace('demo', new_project_name)
with open(f, 'w') as file:
file.write(filedata)
os.rename(folder_to_rename, new_project_name)
self.stdout.write(self.style.SUCCESS(
f'Project has been renamed to {new_project_name}'))
| [
"allromis@gmail.com"
] | allromis@gmail.com |
7759138da9134b5790a90c7571b79f439e325980 | 773b96a8e4c956269aaa36e97d55b3dbf3723ce4 | /test_GDA.py | 38d5c2e8b3955fe1fc1ccb9a22ad43eb67ae10c8 | [] | no_license | stojiljkovicbre/cat2dog | e5e911bcd569c1059b7289f1f41a9d86c91ca713 | 1bf168eeef0291925bc3a15e5fe678ed78548029 | refs/heads/master | 2022-07-16T13:37:44.774194 | 2020-05-19T20:04:31 | 2020-05-19T20:04:31 | 265,298,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | from src.GDA import test_LDA
def main():
test_LDA(['cat2dog/testA', 'cat2dog/testB'])
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | stojiljkovicbre.noreply@github.com |
48ab043b60392099b39e2970a820ebb238c4e530 | 5eb2440d889040253381f53fbd6c02fc40459af9 | /main.py | be89e6a83514ee627f3c73928fbf2c4d251d8e09 | [] | no_license | wmm1002/CanAI-Name2Vec | 6dc0174e31b549509fc5bcde09768825500827b1 | 439ea63135fe15e9f7396469c080c8f1e3f9f159 | refs/heads/master | 2020-08-11T00:16:46.717913 | 2019-04-20T21:29:49 | 2019-04-20T21:29:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,679 | py | from create_model import train_model
from gensim.models.doc2vec import Doc2Vec
from os import makedirs, path
from results import save_results
from sys import argv
#ensure output directories exist
makedirs('models', exist_ok=True)
makedirs('histograms', exist_ok=True)
makedirs('matching_name_distance', exist_ok=True)
makedirs('random_name_distance', exist_ok=True)
#verify and load command line parameters
if len(argv) < 4:
print('Syntax: python main.py [epochs] [vector_size] [window]')
exit(1)
try:
parameters = tuple(int(x) for x in argv[1:])
except:
print('Error: Expected all parameters to be integers. Exiting.')
exit(1)
#create model if it doesn't already exist
model_path = 'models/epochs_%d_vectorSize_%d_window_%d.model' % parameters
histogram_path = 'histograms/epochs_%d_vectorSize_%d_window_%d.png' % parameters
matching_name_path = 'matching_name_distance/epochs_%d_vectorSize_%d_window_%d.csv' % parameters
random_name_path = 'random_name_distance/epochs_%d_vectorSize_%d_window_%d.csv' % parameters
if path.exists(model_path):
print(f"'{model_path}' already exits. Using existing model to re-generate results.")
model = Doc2Vec.load(model_path)
else:
print('Generating model with epochs=%d vector_size=%d window=%d' % parameters)
model = train_model(*parameters)
model.save(model_path)
print(f'Saved model to {model_path}')
save_results(model, histogram_path, matching_name_path, random_name_path)
print(f'Saved histogram to {histogram_path}')
print(f'Saved histogram to {histogram_path}')
print(f'Saved matching name distances to {matching_name_path}')
print(f'Saved random name distances to {random_name_path}')
| [
"foxcroftjn@gmail.com"
] | foxcroftjn@gmail.com |
6d43981f925598e96edfe736f9319522da0ac931 | 897d4ddb90c22cbe2ac5d823f53970dc585ee2d6 | /Prime.py | 0960c8d23ea3311b1010c5959993a0ecf7d939ac | [] | no_license | mfsyed/Prime | efcaf01a0facc614531807bf567fb8c5c9829711 | d5fe2ceb968834ea8563a3e08d27a04dd98b7fba | refs/heads/master | 2020-03-27T18:15:52.660809 | 2018-08-31T15:26:27 | 2018-08-31T15:26:27 | 146,909,715 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py |
number = int(input("Pick a number between 1 and 100 and we'll let you know if it's prime."))
count = 1
divide = 1
for i in range(1,number):
if number%i == 0:
count = count + 1
factor = (number/i)
print(str(factor) + " x " + str(i) + " is " + str(number))
if count == 2:
print("Your number is prime.")
if count != 2:
print("Not Prime.")
| [
"noreply@github.com"
] | mfsyed.noreply@github.com |
7edccf28dd33aa5d5e68e9748141bf717e1182c5 | 8ebd1f4496d2bd1fae3e4e958b29cc2065efee5a | /examples/new_theme.py | 26ad89083825f337bf28655bd36ab5b9f71bfa2e | [
"BSD-3-Clause"
] | permissive | Czaki/napari | 015381339d016ece8137c6fc67e076f9f1fdbd41 | d043abc924441a5f842b4dd699d7c522b2e4b2c8 | refs/heads/master | 2023-08-31T09:46:46.589131 | 2023-07-20T20:07:21 | 2023-07-20T20:07:21 | 248,206,469 | 0 | 0 | BSD-3-Clause | 2023-09-13T22:06:09 | 2020-03-18T10:55:43 | Python | UTF-8 | Python | false | false | 924 | py | """
New theme
=========
Displays an image and sets the theme to new custom theme.
.. tags:: experimental
"""
from skimage import data
import napari
from napari.utils.theme import available_themes, get_theme, register_theme
# create the viewer with an image
viewer = napari.view_image(data.astronaut(), rgb=True, name='astronaut')
# List themes
print('Originally themes', available_themes())
blue_theme = get_theme('dark')
blue_theme.id = "blue"
blue_theme.icon = (
'rgb(0, 255, 255)' # you can provide colors as rgb(XXX, YYY, ZZZ)
)
blue_theme.background = 28, 31, 48 # or as tuples
blue_theme.foreground = [45, 52, 71] # or as list
blue_theme.primary = '#50586c' # or as hexes
blue_theme.current = 'orange' # or as color name
register_theme('blue', blue_theme, "custom")
# List themes
print('New themes', available_themes())
# Set theme
viewer.theme = 'blue'
if __name__ == '__main__':
napari.run()
| [
"noreply@github.com"
] | Czaki.noreply@github.com |
65f33c030f4337590dc31247c63430a37eed2e53 | 372767d4b1b759b153632cf3d42a696bd6ee878d | /scripts/HiCtool_TAD_analysis.py | 9bda970e0e94a8d1ce7cbe5ece601a1edeee7e4b | [] | no_license | szymanska/HiCtool | 4f1629e913bc4b85b0d2ec3d28c741c2f799043c | 528e865ce0a3139bc17d07c78ced4e44a7b399d2 | refs/heads/master | 2022-09-06T16:52:11.294186 | 2020-05-31T20:27:20 | 2020-05-31T20:27:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,722 | py | # Program to perform TAD analysis:
# - Calculate the DI, HMM states and topological domains coordinates.
# - Plot the observed DI and true DI (Hidden Markov Model).
# Usage: python2.7 HiCtool_TAD_analysis.py [-h] [options]
# Options:
# -h, --help show this help message and exit
# --action Action to perform: full_tad_analysis, plot_chromosome_DI.
# -i INPUT_FILE Input contact matrix file if action is "full_tad_analysis" or DI values if action is "plot_chromosome_DI".
# -c CHROMSIZES_PATH Path to the folder chromSizes with trailing slash at the end.
# -s SPECIES Species. It has to be one of those present under the chromSizes path.
# --isGlobal Insert 1 if the input matrix is a global matrix, 0 otherwise.
# --tab_sep Insert 1 if the input matrix is in a tab separated format, 0 if it is in compressed format.
# --chr If action is "full_tad_analysis": chromosome or list of chromosomes between square brackets to select specific maps for the analysis. If action is "plot_chromosome_DI" insert a single chromosome to plot the DI values.
# --data_type Data type to label your data, example: observed, normalized, etc.
# --full_chromosome Insert 1 to plot DI and HMM states for the entire chromosome, 0 otherwise.
# --coord List of two integers with start and end coordinates to plot the DI values and HMM values.
# --input_file_hmm Input HMM states file if action is "plot_chromosome_DI" to plot also the HMM states.
# --plot_legend If action is "plot_chromosome_DI", insert 1 to plot the legend, 0 otherwise.
# --plot_grid If action is "plot_chromosome_DI", insert 1 to plot the grid, 0 otherwise.
from optparse import OptionParser
import numpy as np
import os
import os.path
from os import path
parameters = {'action': None,
'input_file': None,
'chromSizes_path': None,
'isGlobal': None,
'tab_sep': None,
'chr': None,
'species': None,
'data_type': None,
'full_chromosome': None,
'coord': None,
'input_file_hmm': None,
'plot_legend': None,
'plot_grid': None
}
def save_list(a_list, output_file):
"""
Save a list in a txt file.
Arguments:
a_list (obj): name of the list to save.
output_file (str): output file name in txt format.
Output:
txt file containing the saved list.
"""
with open (output_file,'w') as fout:
n = len(a_list)
for i in xrange(n):
fout.write('%s\n' %a_list[i])
def save_matrix_rectangular(a_matrix, output_file):
"""
Save an inter-chromosomal contact matrix in the HiCtool compressed format to txt file.
1) Data are reshaped to form a vector.
2) All the consecutive zeros are replaced with a "0" followed by the
number of times zeros are repeated consecutively.
3) Data are saved to a txt file.
Arguments:
a_matrix (numpy matrix): input contact matrix to be saved
output_file (str): output file name in txt format
Output:
txt file containing the formatted data
"""
import numpy as np
n_row = np.shape(a_matrix)[0]
n_col = np.shape(a_matrix)[1]
vect = np.reshape(a_matrix,[1,n_row*n_col]).tolist()[0]
with open (output_file,'w') as fout:
k = len(vect)
i = 0
count = 0
flag = False # flag to set if the end of the vector has been reached
while i < k and flag == False:
if vect[i] == 0:
count+=1
if (i+count == k):
w_out = str(0) + str(count)
fout.write('%s\n' %w_out)
flag = True
break
while vect[i+count] == 0 and flag == False:
count+=1
if (i+count == k):
w_out = str(0) + str(count)
fout.write('%s\n' %w_out)
flag = True
break
if flag == False:
w_out = str(0) + str(count)
fout.write('%s\n' %w_out)
i+=count
count = 0
else:
fout.write('%s\n' %vect[i])
i+=1
def save_matrix(a_matrix, output_file):
"""
Save an intra-chromosomal contact matrix in the HiCtool compressed format to txt file.
1) The upper-triangular part of the matrix is selected (including the
diagonal).
2) Data are reshaped to form a vector.
3) All the consecutive zeros are replaced with a "0" followed by the
number of times zeros are repeated consecutively.
4) Data are saved to a txt file.
Arguments:
a_matrix (numpy matrix): input contact matrix to be saved
output_file (str): output file name in txt format
Output:
txt file containing the formatted data
"""
import numpy as np
n = len(a_matrix)
iu = np.triu_indices(n)
vect = a_matrix[iu].tolist()
with open (output_file,'w') as fout:
k = len(vect)
i = 0
count = 0
flag = False # flag to set if the end of the vector has been reached
while i < k and flag == False:
if vect[i] == 0:
count+=1
if (i+count == k):
w_out = str(0) + str(count)
fout.write('%s\n' %w_out)
flag = True
break
while vect[i+count] == 0 and flag == False:
count+=1
if (i+count == k):
w_out = str(0) + str(count)
fout.write('%s\n' %w_out)
flag = True
break
if flag == False:
w_out = str(0) + str(count)
fout.write('%s\n' %w_out)
i+=count
count = 0
else:
fout.write('%s\n' %vect[i])
i+=1
def load_matrix(input_file):
"""
Load an HiCtool compressed square (and symmetric) contact matrix from a txt file and parse it.
Arguments:
input_file (str): input file name in txt format (generated by the function
"save_matrix").
Return: numpy array containing the parsed values stored in the input txt file to build a contact matrix.
"""
import numpy as np
print "Loading " + input_file + "..."
with open (input_file,'r') as infile:
matrix_vect = []
for i in infile:
if i[0] == "0" and i[1] != ".":
for k in xrange(int(i[1:-1])):
matrix_vect.append(0)
else:
j = i[:-1]
matrix_vect.append(float(j))
k = len(matrix_vect)
matrix_size = int((-1+np.sqrt(1+8*k))/2)
iu = np.triu_indices(matrix_size)
output_matrix_1 = np.zeros((matrix_size,matrix_size)) # upper triangular plus the diagonal
output_matrix_1[iu] = matrix_vect
diag_matrix = np.diag(np.diag(output_matrix_1)) # diagonal
output_matrix_2 = np.transpose(output_matrix_1) # lower triangular plus the diagonal
output_matrix = output_matrix_1 + output_matrix_2 - diag_matrix
print "Done!"
return output_matrix
def save_matrix_tab(input_matrix, output_filename):
"""
Save a contact matrix in a txt file in a tab separated format. Columns are
separated by tabs, rows are in different lines.
Arguments:
input_matrix (numpy matrix): input contact matrix to be saved
output_filename (str): output file name in txt format
Output:
txt file containing the tab separated data
"""
with open (output_filename, 'w') as f:
for i in xrange(len(input_matrix)):
row = [str(j) for j in input_matrix[i]]
f.write('\t'.join(row) + '\n')
def load_matrix_tab(input_file):
"""
Load a contact matrix saved in a tab separated format using the function
"save_matrix_tab".
Arguments:
input_file (str): input contact matrix to be loaded.
Return: numpy array containing the parsed values stored in the input tab separated txt file to build a contact matrix.
"""
import numpy as np
print "Loading " + input_file + "..."
with open (input_file, 'r') as infile:
lines = infile.readlines()
temp = []
for line in lines:
row = [float(i) for i in line.strip().split('\t')]
temp.append(row)
output_matrix = np.array(temp)
print "Done!"
return output_matrix
def load_DI_values(input_file):
"""
Load a DI txt file generated with "calculate_chromosome_DI".
Arguments:
input_file (str): input file name in txt format.
Return:
List of the DI values.
"""
import numpy as np
fp = open(input_file,'r+')
lines = fp.read().split('\n')
lines = lines[:-1]
di_values = (np.nan_to_num(np.array(map(float, lines)))).tolist()
return di_values
def extract_single_map(input_global_matrix,
tab_sep,
chr_row,
chr_col,
species='hg38',
bin_size=1000000,
data_type='observed',
save_output=True,
save_tab=False):
"""
Extract a single contact matrix for a pair of chromosomes from the global matrix (all-by-all chromosomes).
Arguments:
input_global_matrix (object | str): global contact matrix. This can be passed either as
an object of the workspace or a string of the filename saved to file.
tab_sep (bool): if "input_global_matrix" is passed with a filename, then this boolean
tells if the global matrix was saved in tab separated format (True) or not (False).
chr_row (str): chromosome in the rows of the output contact matrix.
chr_col (str): chromosome in the columns of the output contact matrix. If chr_col is
equal to chr_row then the intra-chromosomal map is extracted.
species (str): species label in string format.
bin_size (int): bin size in bp of the contact matrix.
data_type (str): which kind of data type you are extracting: "observed" or "normalized".
save_output (bool): if True, save the contact matrix in HiCtool compressed txt file.
save_tab (bool): if True, save the contact matrix in tab separated format.
Return:
Contact matrix in numpy array format.
Outputs:
Txt file with the contact matrix in HiCtool compressed format if "save_output=True".
Txt file with the contact matrix in tab separated format if "save_tab=True".
"""
chromosomes = open(parameters['chromSizes_path'] + parameters['species'] + '.chrom.sizes', 'r')
chromosomes_list = []
chr_dim = []
d_chr_dim = {}
while True:
try:
line2list = next(chromosomes).split('\n')[0].split('\t')
chromosomes_list.append(line2list[0])
chr_dim.append(int(line2list[1])/bin_size)
d_chr_dim[line2list[0]] = int(line2list[1])/bin_size
except StopIteration:
break
d_chr_dim_inc = {}
k=1
for i in chromosomes_list:
d_chr_dim_inc[i] = sum(chr_dim[:k])
k+=1
if isinstance(input_global_matrix,str):
if tab_sep == False:
full_matrix = load_matrix(input_global_matrix)
else:
full_matrix = load_matrix_tab(input_global_matrix)
else:
full_matrix = input_global_matrix
d_chr_dim_inc = {}
k=1
for i in chromosomes_list:
d_chr_dim_inc[i] = sum(chr_dim[:k])
k+=1
if isinstance(input_global_matrix,str):
if tab_sep == False:
full_matrix = load_matrix(input_global_matrix)
else:
full_matrix = load_matrix_tab(input_global_matrix)
else:
full_matrix = input_global_matrix
if chr_row == '1':
row_start = 0
else:
row_start = d_chr_dim_inc[chromosomes_list[chromosomes_list.index(chr_row)-1]]
row_end = row_start + d_chr_dim[chr_row]
if chr_col == '1':
col_start = 0
else:
col_start = d_chr_dim_inc[chromosomes_list[chromosomes_list.index(chr_col)-1]]
col_end = col_start + d_chr_dim[chr_col]
output_matrix = full_matrix[row_start:row_end,col_start:col_end]
if chr_row == chr_col:
if bin_size >= 1000000:
bin_size_str = str(bin_size/1000000)
my_filename = 'HiCtool_' 'chr' + chr_row + '_' + bin_size_str + 'mb_' + data_type + '.txt'
elif bin_size < 1000000:
bin_size_str = str(bin_size/1000)
my_filename = 'HiCtool_' 'chr' + chr_row + '_' + bin_size_str + 'kb_' + data_type + '.txt'
if save_output == True:
save_matrix(output_matrix, my_filename)
else:
if bin_size >= 1000000:
bin_size_str = str(bin_size/1000000)
my_filename = 'HiCtool_' 'chr' + chr_row + '_chr' + chr_col + '_' + bin_size_str + 'mb_' + data_type + '.txt'
elif bin_size < 1000000:
bin_size_str = str(bin_size/1000)
my_filename = 'HiCtool_' 'chr' + chr_row + '_chr' + chr_col + '_' + bin_size_str + 'kb_' + data_type + '.txt'
if save_output == True:
save_matrix_rectangular(output_matrix, my_filename)
if save_tab == True:
save_matrix_tab(output_matrix, my_filename.split('.')[0] + '_tab.txt')
return output_matrix
def calculate_chromosome_DI(input_contact_matrix,
a_chr,
isGlobal,
tab_sep=False,
data_type='normalized',
species='hg38',
save_file=True):
"""
Function to calculate the DI values for a chromosome and save them
in a txt file.
Arguments:
input_contact_matrix (str | obj): normalized intra-chromosomal contact matrix at a bin size of 40kb passed as a filename (str)
or an object. Either a single contact matrix or a global contact matrix can be passed (see following arguments).
a_chr (str): chromosome number (example for chromosome 1: '1').
isGlobal (bool): set True if your input matrix is a global matrix (all-by-all chromosomes).
tab_sep (bool): set True if your input matrix is in a tab separated format. If the matrix is passed as an
object, this parameter is not taken into consideration.
species (str): species label in string format.
save_file (bool): if True, saves the DI values to txt file.
Returns: List with the DI values.
Output: Txt file with the DI values if "save_file=True".
"""
import copy
if isGlobal == False:
if isinstance(input_contact_matrix, str):
if tab_sep == False:
contact_matrix = load_matrix(input_contact_matrix)
else:
contact_matrix = load_matrix_tab(input_contact_matrix)
else:
contact_matrix = copy.deepcopy(input_contact_matrix)
else:
contact_matrix = extract_single_map(input_global_matrix=input_contact_matrix,
tab_sep=tab_sep,
chr_row=a_chr,
chr_col=a_chr,
species=species,
bin_size=40000,
data_type=data_type,
save_output=False,
save_tab=False)
print "Calculating DI values..."
n = contact_matrix.shape[0]
# Calculation of the DI
DI = [] # list of the DI for each bin
len_var = 2000000/40000 # range of upstream or downstream bins to calculate DI
for locus in xrange(n): # 'locus' refers to a bin
if locus < len_var:
A = sum(contact_matrix[locus][:locus])
B = sum(contact_matrix[locus][locus+1:locus+len_var+1])
elif locus >= n-len_var:
A = sum(contact_matrix[locus][locus-len_var:locus])
B = sum(contact_matrix[locus][locus+1:])
else:
A = sum(contact_matrix[locus][locus-len_var:locus])
B = sum(contact_matrix[locus][locus+1:locus+len_var+1])
E = (A+B)/2 # expected number of reads
if A==0 and B==0:
di = 0
DI.append(di)
else:
try:
di = ((B-A)/(abs(B-A)))*((((A-E)**2)/E)+(((B-E)**2)/E))
except ZeroDivisionError:
di = 0
DI.append(di)
if save_file == True:
save_list(DI, 'tad_analysis/HiCtool_chr' + a_chr + '_DI.txt')
print "Done!"
return DI
def calculate_chromosome_hmm_states(input_file_DI,
a_chr,
save_file=True):
"""
Function to calculate the HMM states (true DI values) for a chromosome and save
them in a txt file. It takes DI values as input.
Arguments:
input_file_DI (str | obj): txt file of the DI values generated with the function "calculate_chromosome_DI" or
object with the DI values returned by "calculate_chromosome_DI".
a_chr (str): chromosome number (example for chromosome 1: '1').
save_file (bool): if True, saves the hmm states to txt file.
Returns: List with the HMM states.
Output: Txt file with the DI values if "save_file=True".
"""
import numpy as np
import hmmlearn.hmm as hmm
print "Calculating true DI values..."
if isinstance(input_file_DI,str):
A = load_DI_values(input_file_DI)
else:
A = input_file_DI
# Guessed Transition Matrix
TRANS_GUESS = np.array([[0.4, 0.3, 0.3],
[0.3, 0.4, 0.3],
[0.3, 0.3, 0.4]])
# Guessed Emission Matrix
EMISS_GUESS = np.array([[0.4, 0.3, 0.3],
[0.3, 0.4, 0.3],
[0.3, 0.3, 0.4]])
# Observed emissions
emissions = []
zero_threshold = 0.4;
for i in range(0,len(A)):
if A[i] >= zero_threshold:
emissions.append(1)
elif A[i] <= -zero_threshold:
emissions.append(2)
else:
emissions.append(0)
# Hidden Markov Model with discrete emissions
model = hmm.MultinomialHMM(n_components=3, init_params="")
model.transmat_ = TRANS_GUESS
model.emissionprob_ = EMISS_GUESS
input_observations = np.array([emissions]).T
model.fit(input_observations) # estimate model parameters
# Find most likely state sequence corresponding to input_onservations using the Viterbi Algorithm
logprob, likelystates_array = model.decode(input_observations, algorithm="viterbi")
likelystates = likelystates_array.tolist()
if save_file == True:
save_list(likelystates, "tad_analysis/HiCtool_chr" + a_chr + "_hmm_states.txt")
print "Done!"
return likelystates
def load_hmm_states(input_file):
"""
Load an HMM txt file generated with "calculate_chromosome_hmm_states".
Arguments:
input_file (str): input file name in txt format.
Returns:
List of the HMM states.
"""
fp = open(input_file,'r+')
lines = fp.read().split('\n')
lines = lines[:-1]
likelystates = map(int,lines)
return likelystates
def plot_chromosome_DI(input_file_DI,
a_chr,
full_chromosome,
start_pos=0,
end_pos=0,
input_file_hmm=None,
species='hg38',
plot_legend=True,
plot_grid=True):
"""
Function to plot the DI and true DI values for a chromosome.
Arguments:
input_file_DI (str | obj): txt file of the DI values generated with the function "calculate_chromosome_DI" or
object with the DI values returned by "calculate_chromosome_DI".
a_chr (str): chromosome number (example for chromosome 1: '1').
full_chromosome (bool): if True, plot the full chromosome "a_chr". In this case "start_pos" and "end_pos" parameters are not considered.
start_pos (int): start coordinate for the plot in bp.
end_pos (int): end coordinate for the plot in bp.
input_file_hmm (str | obj): txt file of the true DI values generated with the function "calculate_chromosome_hmm_states" or
object with the true DI values returned by "calculate_chromosome_hmm_states".
species (str): species name (hg38, mm10, etc.).
plot_legend (bool): if True, plot the legend.
plot_grid (bool): if True, plot the grid.
Output:
Plot saved to pdf file.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
bin_size = 40000
chromosomes = open(parameters['chromSizes_path'] + parameters['species'] + '.chrom.sizes', 'r')
chromosomes_list = []
chr_dim = []
d_chr_length = {}
d_chr_dim = {}
while True:
try:
line2list = next(chromosomes).split('\n')[0].split('\t')
chromosomes_list.append(line2list[0])
chr_dim.append(int(line2list[1])/bin_size)
d_chr_length[line2list[0]] = int(line2list[1])
d_chr_dim[line2list[0]] = int(line2list[1])/bin_size
except StopIteration:
break
if full_chromosome == True:
start_pos = 0
end_pos = int(round(d_chr_dim[a_chr]))*bin_size
start_index = 0
end_index = int(round(d_chr_dim[a_chr])) + 1
else:
if end_pos == 0:
print "ERROR: insert start and end coordinates"
return
start_index = int(round(start_pos/bin_size))
end_index = int(round((end_pos)/bin_size))
if end_pos > int(round(d_chr_dim[a_chr]))*bin_size and end_pos <= d_chr_length[a_chr]:
end_pos = int(round(d_chr_dim[a_chr]))*bin_size
elif end_pos > d_chr_length[a_chr]:
print("ERROR: end coordinate exceeds chromosome dimension")
return
if isinstance(input_file_DI,str):
DI = load_DI_values(input_file_DI)
else:
DI = input_file_DI
DI_part = DI[start_index:end_index]
x = np.arange(start_pos,end_pos,bin_size)
width = bin_size/1.5
pos_DI = np.array(DI_part)
neg_DI = np.array(DI_part)
pos_DI[pos_DI <= 0] = np.nan
neg_DI[neg_DI > 0] = np.nan
if input_file_hmm == None:
print "Plotting DI values..."
plt.close("all")
plt.bar(x, pos_DI, width, color="r", label="Positive DI")
plt.bar(x, neg_DI, width, color="g", label="Negative DI")
plt.xlim([x[0]-bin_size*8,x[-1]+bin_size*8])
plt.ylim([min(DI_part)-25,max(DI_part)+25])
plt.title("Directionality Index " + species + " [Chr " + a_chr +": " + str(start_pos) + "-" + str(end_pos) + "]")
plt.xlabel("Base coordinates")
plt.ylabel("Directionality Index (DI) values")
plt.grid(plot_grid)
if plot_legend == True:
plt.legend(prop={'size': 8})
plt.savefig("tad_analysis/HiCtool_chr" + a_chr + "_DI.pdf", format = 'pdf')
print "Done!"
else:
print "Plotting DI and true DI values..."
if isinstance(input_file_hmm,str):
likelystates = load_hmm_states(input_file_hmm)
else:
likelystates = input_file_hmm
DI_true = []
for i in range(0,len(likelystates)):
if likelystates[i] == 1:
DI_true.append(min(DI_part)-12)
elif likelystates[i] == 2:
DI_true.append(min(DI_part)-15)
else:
DI_true.append(0)
DI_true_part = DI_true[start_index:end_index]
# Plot
pos_DI_true = np.array(DI_true_part)
neg_DI_true = np.array(DI_true_part)
pos_DI_true[pos_DI_true != min(DI_part)-12] = np.nan
neg_DI_true[neg_DI_true != min(DI_part)-15] = np.nan
plt.close("all")
plt.bar(x, pos_DI, width, color="r", label="Positive DI", linewidth = 0.1)
plt.bar(x, neg_DI, width, color="g", label="Negative DI", linewidth = 0.1)
plt.plot(x, pos_DI_true, marker=">", color="r", label="Positive true DI")
plt.plot(x, neg_DI_true, marker="<", color="g", label="Negative true DI")
plt.xlim([x[0]-bin_size*8,x[-1]+bin_size*8])
plt.ylim([min(DI_part)-25,max(DI_part)+25])
plt.title("Directionality Index " + species + " [Chr " + a_chr +": " + str(start_pos) + "-" + str(end_pos) + "]")
plt.xlabel("Base coordinates")
plt.ylabel("Directionality Index (DI) values")
plt.grid(plot_grid)
if plot_legend == True:
plt.legend(prop={'size': 8})
plt.savefig("tad_analysis/HiCtool_chr" + a_chr + "_DI_HMM.pdf", format = 'pdf')
print "Done!"
def save_topological_domains(a_matrix, output_file):
"""
Function to save the topological domains coordinates to text file.
Each topological domain coordinates (start and end) occupy one row and are
tab separated.
Arguments:
a_matrix (numpy matrix): file to be saved with topological domains coordinates.
output_file (str): output file name in txt format.
Output:
Tab separated txt file with topological domain start and end coordinates.
"""
def compile_row_string(a_row):
return str(a_row).strip(']').strip('[').lstrip().replace(' ','\t')
with open(output_file, 'w') as f:
for row in a_matrix:
f.write(compile_row_string(row)+'\n')
def load_topological_domains(input_file):
"""
Function to load the topological domains coordinates from txt file.
Arguments:
input_file (str): input file name generated with "calculate_topological_domains" in txt format.
Return:
List of lists with topological domain coordinates.
"""
import csv
print "Loading topological domain coordinates..."
with open(input_file, 'r') as f:
reader = csv.reader(f, dialect='excel', delimiter='\t')
topological_domains = []
for row in reader:
row_int = [int(x) for x in row]
topological_domains.append(row_int)
print "Done!"
return topological_domains
def calculate_chromosome_topological_domains(input_file_hmm,
a_chr):
"""
Function to calculate the topological domains coordinates of a chromosome. It takes the
HMM states as input. Topological domains are stored in each line with tab separated start and end coordinates.
Arguments:
input_file_hmm (str | obj): txt file of the HMM states generated with the function
"calculate_chromosome_hmm_states" or object with the true DI values returned by "calculate_chromosome_hmm_states".
a_chr (str): chromosome number (example for chromosome 1: '1').
Returns: List of lists with topological domain coordinates.
Output: Tab separated txt file with the topological domain coordinates.
"""
import numpy as np
bin_size = 40000
print "Calculating topological domain coordinates..."
if isinstance(input_file_hmm,str):
likelystates = load_hmm_states(input_file_hmm)
else:
likelystates = input_file_hmm
# Start coordinates of the domains
p = []
for i in range(1,len(likelystates)):
if (likelystates[i] == 1 and likelystates[i-1] == 2) or (likelystates[i] == 1 and likelystates[i-1] == 0):
p.append(i * bin_size)
# End coordinates of the domains
n = []
for i in range(1,len(likelystates)-1):
if (likelystates[i] == 2 and likelystates[i+1] == 1) or (likelystates[i] == 2 and likelystates[i+1] == 0):
n.append(i * bin_size)
if len(p) == 0 or len(n) == 0:
print "WARNING! No topological domains can be detected in chromosome " + a_chr
return
p1 = 0
n1 = 0
p2 = 1
n2 = 1
# Step 1: checking if the first negative values are greater than the first positive value.
while n[n1] < p[p1]:
n1 = n1 + 1
n2 = n2 + 1
# Now we have removed all the first negative values before the first positive one.
topological_domains = []
while p1 < len(p)-1 and n1 < len(n)-1:
# Step 2: checking if there are two consecutive positive values.
while n[n1] > p[p2] and p2 < len(p)-1:
p2 = p2 + 1
# Now we have removed the possible gaps between consecutive positive states.
# Step 3: checking if there are two consecutive negative values.
while n[n2] < p[p2] and n2 < len(n)-1:
n1 = n1 + 1
n2 = n2 + 1
# Now we have removed the possible gaps between consecutive negative states.
# Step 4: identification of the Topological Domain.
topological_domains.append([p[p1],n[n1]])
p1 = p2
n1 = n2
p2 = p1 + 1
n2 = n1 + 1
save_topological_domains(np.matrix(topological_domains), "tad_analysis/HiCtool_chr" + a_chr + "_topological_domains.txt")
print "Done!"
return topological_domains
def full_tad_analysis(input_contact_matrix,
a_chr,
isGlobal,
tab_sep,
species='hg38',
data_type='normalized',
save_di=True,
save_hmm=True):
"""
Compute DI values, HMM states and topological domain coordinates for a chromosome.
Arguments:
input_contact_matrix (str | obj): normalized intra-chromosomal contact matrix at a bin size of 40kb passed as a filename (str)
or an object. Either a single contact matrix or a global contact matrix can be passed (see following parameters).
a_chr: chromosome number (example for chromosome 1: '1').
isGlobal (bool): set True if your input matrix is a global matrix (all-by-all chromosomes).
tab_sep (bool): set True if your input matrix is in a tab separated format. If the matrix is passed as an
object, this parameter is not taken into consideration.
species (str): 'hg38' or 'mm10' or any other species label in string format.
data_type (str): data type, "observed" or "normalized".
save_di (bool): if True, save the DI values to txt file.
save_hmm (bool): if True, save the HMM states to txt file.
Returns: List of lists with topological domain coordinates.
Output:
Txt file containing topological domain coordinates.
Txt file with the DI values if "save_di=True".
Txt file with the HMM states if "save_hmm=True".
Single chromosome contact matrix in compressed format if the input matrix is a global matrix.
"""
import copy
bin_size = 40000
if isGlobal == False:
if isinstance(input_contact_matrix, str):
if tab_sep == False:
contact_matrix = load_matrix(input_contact_matrix)
else:
contact_matrix = load_matrix_tab(input_contact_matrix)
else:
contact_matrix = copy.deepcopy(input_contact_matrix)
else:
contact_matrix = extract_single_map(input_global_matrix=input_contact_matrix,
tab_sep=tab_sep,
chr_row=a_chr,
chr_col=a_chr,
species=species,
bin_size=bin_size,
data_type=data_type,
save_output=False,
save_tab=False)
# DI VALUES
DI = calculate_chromosome_DI(input_contact_matrix=contact_matrix,
a_chr=a_chr,
isGlobal=False,
tab_sep=False,
data_type=data_type,
species=species,
save_file=save_di)
# HMM STATES
HMM = calculate_chromosome_hmm_states(input_file_DI=DI,
a_chr=a_chr,
save_file=save_hmm)
# TOPOLOGICAL DOMAIN COORDINATES
tad = calculate_chromosome_topological_domains(input_file_hmm=HMM,
a_chr=a_chr)
return tad
if __name__ == '__main__':
usage = 'Usage: python2.7 HiCtool_TAD_analysis.py [-h] [options]'
parser = OptionParser(usage = 'python2.7 %prog --action action -i input_file [options]')
parser.add_option('--action', dest='action', type='string', help='Action to perform: full_tad_analysis or plot_chromosome_DI.')
parser.add_option('-i', dest='input_file', type='string', help='Input contact matrix file if action is "full_tad_analysis" or DI values if action is "plot_chromosome_DI".')
parser.add_option('-c', dest='chromSizes_path', type='string', help='Path to the folder chromSizes with trailing slash at the end.')
parser.add_option('-s', dest='species', type='string', help='Species. It has to be one of those present under the chromSizes path.')
parser.add_option('--isGlobal', dest='isGlobal', type='int', help='Insert 1 if the input matrix is a global matrix, 0 otherwise.')
parser.add_option('--tab_sep', dest='tab_sep', type='int', help='Insert 1 if the input matrix is in a tab separated format, 0 if it is in compressed format.')
parser.add_option('--chr', dest='chr', type='str', help='If action is "full_tad_analysis": chromosome or list of chromosomes between square brackets to select specific maps for the analysis. If action is "plot_chromosome_DI" insert a single chromosome to plot the DI values.')
parser.add_option('--data_type', dest='data_type', type='str', default='normalized', help='Data type to label your data, example: observed, normalized, etc.')
parser.add_option('--full_chromosome', dest='full_chromosome', type='int', help='Insert 1 to plot DI and HMM states for the entire chromosome, 0 otherwise.')
parser.add_option('--coord', dest='coord', type='str', help='List of two integers with start and end coordinates to plot the DI values and HMM values.')
parser.add_option('--input_file_hmm', dest='input_file_hmm', type='string', help='Input HMM states file if action is "plot_chromosome_DI" to plot also the HMM states.')
parser.add_option('--plot_legend', dest='plot_legend', type='int', default=1, help='If action is "plot_chromosome_DI", insert 1 to plot the legend, 0 otherwise.')
parser.add_option('--plot_grid', dest='plot_grid', type='int', default=1, help='If action is "plot_chromosome_DI", insert 1 to plot the grid, 0 otherwise.')
(options, args) = parser.parse_args( )
if options.action == None:
parser.error('-h for help or provide the action command (full_tad_analysis or plot_chromosome_di)!')
else:
pass
if options.input_file == None:
parser.error('-h for help or provide the input contact matrix or the DI values file!')
else:
pass
if options.chromSizes_path == None:
parser.error('-h for help or provide the chromSizes path!')
else:
pass
if options.species == None:
parser.error('-h for help or provide the species!')
else:
pass
if options.chr == None:
parser.error('-h for help or provide the input chromosomes (list of chromosomes accepted if action is "full_tad_analysis", single chromosome only for "plot_chromosome_DI")!')
else:
pass
parameters['action'] = options.action
parameters['input_file'] = options.input_file
parameters['chromSizes_path'] = options.chromSizes_path
parameters['isGlobal'] = options.isGlobal
parameters['tab_sep'] = options.tab_sep
parameters['chr'] = options.chr
parameters['species'] = options.species
parameters['data_type'] = options.data_type
parameters['full_chromosome'] = options.full_chromosome
parameters['coord'] = options.coord
parameters['input_file_hmm'] = options.input_file_hmm
parameters['plot_legend'] = options.plot_legend
parameters['plot_grid'] = options.plot_grid
if parameters['species'] + ".chrom.sizes" not in os.listdir(parameters['chromSizes_path']):
available_species = ', '.join([x.split('.')[0] for x in os.listdir(parameters['chromSizes_path'])])
parser.error('Wrong species inserted! Check the species spelling or insert an available species: ' + available_species + '. If your species is not listed, please contact Riccardo Calandrelli at <rcalandrelli@eng.ucsd.edu>.')
output_path = "tad_analysis"
if not path.exists(output_path):
os.mkdir(output_path)
if parameters['action'] == 'full_tad_analysis':
if options.isGlobal == None:
parser.error('-h for help or insert 1 if the contact matrix is global (all-by-all chromosomes), 0 otherwise!')
else:
pass
if options.tab_sep == None:
parser.error('-h for help or insert 1 if the contact matrix is in tab separated format, 0 otherwise!')
else:
pass
if options.data_type == None:
parser.error('-h for help or insert a custom label for the data type (observed, normalized, etc.)!')
else:
pass
chr_list = map(str, parameters['chr'].strip('[]').split(','))
if bool(parameters['isGlobal']) == False:
if len(chr_list) > 1:
parser.error('To perform the analysis on multiple chromosomes you must insert a global all-by-all chromosomes matrix.')
else:
pass
if bool(parameters['tab_sep']) == False:
contact_matrix = load_matrix(parameters['input_file'])
else:
contact_matrix = load_matrix_tab(parameters['input_file'])
for c in chr_list:
print "Performing TAD analysis on chr" + c + " ..."
full_tad_analysis(contact_matrix,
c,
parameters['isGlobal'],
parameters['tab_sep'],
parameters['species'],
parameters['data_type'],
True,
True)
print "Done!"
elif parameters['action'] == 'plot_chromosome_DI':
if options.full_chromosome == None:
parser.error('-h for help or insert 1 if you wish to plot the DI for the entire chromosome, 0 otherwise!')
else:
pass
chr_list = map(str, parameters['chr'].strip('[]').split(','))
if len(chr_list) > 1:
parser.error("Only a single chromosome is accepted if action is plot_chromosome_DI!")
if bool(parameters["full_chromosome"]) == False:
coord = map(int, parameters['coord'].strip('[]').split(','))
start_pos = coord[0]
end_pos = coord[1]
else:
start_pos = 0
end_pos = 0
plot_chromosome_DI(parameters["input_file"],
parameters["chr"],
bool(parameters["full_chromosome"]),
start_pos,
end_pos,
parameters["input_file_hmm"],
parameters["species"],
bool(parameters["plot_legend"]),
bool(parameters["plot_grid"]))
| [
"rcalandrelli@eng.ucsd.edu"
] | rcalandrelli@eng.ucsd.edu |
c5ef9f3c896720bfe3cbcd8bf8087394c0635cc3 | 343bdaddfc66c6316e2cee490e9cedf150e3a5b7 | /0101_0200/0140/0140.py | fcfbf5535dac6588d0fb41901a5501b3284bd7d6 | [] | no_license | dm-alexi/acmp | af7f6b4484b78f5922f3b464406a0ba5dea0d738 | 3fa0016d132adfeab7937b3e8c9687a34642c93a | refs/heads/master | 2021-07-09T15:14:25.857086 | 2020-10-20T19:08:54 | 2020-10-20T19:08:54 | 201,908,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | from math import inf
with open("input.txt", "r") as f, open("output.txt", "w") as q:
n = int(f.readline())
m = [[int(x) if x != "100000" else inf for x in f.readline().split()] for i in range(n)]
for k in range(n):
for i in range(n):
for j in range(n):
if m[i][k] < inf and m[k][j] < inf and m[i][k] + m[k][j] < m[i][j]:
m[i][j] = m[i][k] + m[k][j]
q.write("YES" if any(m[i][i] < 0 for i in range(n)) else "NO")
| [
"dm2.alexi@gmail.com"
] | dm2.alexi@gmail.com |
e3777872b94428267992a01b44c30ba2643b99bc | c91b68be796a9835c528856b6f5fa7b56d2af451 | /examples/mnist_convnet.py | d9e994d350811b397b81ced710890fceedbf32db | [
"Apache-2.0"
] | permissive | syzh1991/tensorpack | fe61cb46fd40aa0cb9f8a0a3ea4ea3bb833cb3c5 | 174c3fc9d60b0cbeccac2ae3e73e73d6e788dbe0 | refs/heads/master | 2021-01-17T00:24:08.366350 | 2016-04-19T06:25:57 | 2016-04-19T06:25:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,520 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: mnist_convnet.py
# Author: Yuxin Wu <ppwwyyxx@gmail.com>
import tensorflow as tf
import numpy as np
import os, sys
import argparse
from tensorpack.train import *
from tensorpack.models import *
from tensorpack.utils import *
from tensorpack.tfutils.symbolic_functions import *
from tensorpack.tfutils.summary import *
from tensorpack.tfutils import *
from tensorpack.callbacks import *
from tensorpack.dataflow import *
"""
MNIST ConvNet example.
about 0.6% validation error after 30 epochs.
"""
BATCH_SIZE = 128
IMAGE_SIZE = 28
class Model(ModelDesc):
def _get_input_vars(self):
return [InputVar(tf.float32, (None, IMAGE_SIZE, IMAGE_SIZE), 'input'),
InputVar(tf.int32, (None,), 'label')
]
def _get_cost(self, input_vars, is_training):
is_training = bool(is_training)
keep_prob = tf.constant(0.5 if is_training else 1.0)
image, label = input_vars
image = tf.expand_dims(image, 3) # add a single channel
nl = PReLU.f
image = image * 2 - 1
l = Conv2D('conv0', image, out_channel=32, kernel_shape=3, nl=nl,
padding='VALID')
l = MaxPooling('pool0', l, 2)
l = Conv2D('conv1', l, out_channel=32, kernel_shape=3, nl=nl, padding='SAME')
l = Conv2D('conv2', l, out_channel=32, kernel_shape=3, nl=nl, padding='VALID')
l = MaxPooling('pool1', l, 2)
l = Conv2D('conv3', l, out_channel=32, kernel_shape=3, nl=nl, padding='VALID')
l = FullyConnected('fc0', l, 512)
l = tf.nn.dropout(l, keep_prob)
# fc will have activation summary by default. disable this for the output layer
logits = FullyConnected('fc1', l, out_dim=10, nl=tf.identity)
prob = tf.nn.softmax(logits, name='prob')
cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, label)
cost = tf.reduce_mean(cost, name='cross_entropy_loss')
tf.add_to_collection(MOVING_SUMMARY_VARS_KEY, cost)
# compute the number of failed samples, for ClassificationError to use at test time
wrong = prediction_incorrect(logits, label)
nr_wrong = tf.reduce_sum(wrong, name='wrong')
# monitor training error
tf.add_to_collection(
MOVING_SUMMARY_VARS_KEY, tf.reduce_mean(wrong, name='train_error'))
# weight decay on all W of fc layers
wd_cost = tf.mul(1e-5,
regularize_cost('fc.*/W', tf.nn.l2_loss),
name='regularize_loss')
tf.add_to_collection(MOVING_SUMMARY_VARS_KEY, wd_cost)
add_param_summary([('.*/W', ['histogram'])]) # monitor histogram of all W
return tf.add_n([wd_cost, cost], name='cost')
def get_config():
basename = os.path.basename(__file__)
logger.set_logger_dir(
os.path.join('train_log', basename[:basename.rfind('.')]))
# prepare dataset
dataset_train = BatchData(dataset.Mnist('train'), 128)
dataset_test = BatchData(dataset.Mnist('test'), 256, remainder=True)
step_per_epoch = dataset_train.size()
# prepare session
sess_config = get_default_sess_config()
sess_config.gpu_options.per_process_gpu_memory_fraction = 0.5
lr = tf.train.exponential_decay(
learning_rate=1e-3,
global_step=get_global_step_var(),
decay_steps=dataset_train.size() * 10,
decay_rate=0.3, staircase=True, name='learning_rate')
tf.scalar_summary('learning_rate', lr)
return TrainConfig(
dataset=dataset_train,
optimizer=tf.train.AdamOptimizer(lr),
callbacks=Callbacks([
StatPrinter(),
ModelSaver(),
InferenceRunner(dataset_test,
[ScalarStats('cost'), ClassificationError() ])
]),
session_config=sess_config,
model=Model(),
step_per_epoch=step_per_epoch,
max_epoch=100,
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') # nargs='*' in multi mode
parser.add_argument('--load', help='load model')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
else:
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
with tf.Graph().as_default():
config = get_config()
if args.load:
config.session_init = SaverRestore(args.load)
SimpleTrainer(config).train()
| [
"ppwwyyxxc@gmail.com"
] | ppwwyyxxc@gmail.com |
d06d7c4a50a9d2ed62e1339c2c422ef078e2e611 | 7410903c6cd5ef35c592af00c934fb21c369cbf2 | /00_Code/01_LeetCode/69_Sqrt.py | 4f2aa947d9e808ddbc9837a59a51ea6e638dbf3b | [
"MIT"
] | permissive | KartikKannapur/Algorithms | f4e4726170599db0622d18e8c06a382e9bce9e77 | 66e3c8112826aeffb78bd74d02be1a8d1e478de8 | refs/heads/master | 2020-12-25T18:32:41.086518 | 2020-10-19T02:59:47 | 2020-10-19T02:59:47 | 93,961,043 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | # #Implement int sqrt(int x).
# #Compute and return the square root of x.
# #x is guaranteed to be a non-negative integer.
# #Your runtime beats 81.07 % of python submissions.
class Solution(object):
def mySqrt(self, x):
"""
:type x: int
:rtype: int
"""
"""
Method 1: Built-in functions
"""
# import math
# return int(math.sqrt(int(x)))
"""
Method 2: Binary Search
Your runtime beats 53.94 % of python submissions.
"""
low = 0
high = x
while low <= high:
mid = (low + high) // 2
if mid ** 2 <= x < (mid + 1) ** 2:
return mid
elif mid ** 2 > x:
high = mid
else:
low = mid + 1 | [
"kartikkannapur@gmail.com"
] | kartikkannapur@gmail.com |
5486ec3f9035cc8d5ab182d372bd09effbdc81d9 | 60afeba07c0c8e86f53a057b2358e8448f1ad97c | /python/TestFor25.py | 277ecabca5dc33827e15edd9fcfcf7fe38e14725 | [] | no_license | umn2o2co2/DivisibilityTestPrograms | 202c07176f67534553e12c1f0ccfd9b78a1b59ec | 67f734efd86051b8c954de6ce01047d2a92973a7 | refs/heads/master | 2020-03-31T04:58:48.072800 | 2018-10-07T10:07:03 | 2018-10-07T10:07:03 | 151,912,701 | 0 | 0 | null | 2018-10-07T06:30:30 | 2018-10-07T06:30:30 | null | UTF-8 | Python | false | false | 177 | py | endin = ['0','25', '50', '75', '00']
n = input('Input to test for divisibility by 25: ')
if n[-2:] in endin:
print('Divisible by 25')
else:
print('Not Divisible by 25') | [
"primeoptimus98@gmail.com"
] | primeoptimus98@gmail.com |
73d070902483280a7fc45ae6ffdbc811594dd9e2 | 02ae54de0a3e508bf7b94548916b7ec61e077d57 | /lib/LogicOperators.py | 3eb1b1300877a45bb848234a368153b7fdd111e5 | [] | no_license | Livruen/-home-livruen-Neural-Network-Dokumentation-Backpropagation-Code-NeuralNetwork_Backpropagation_Facer | b1449f864525805f55e2601a01cd27128096b363 | 9c29e10c5b2af90e137f9062b4b9405338db14db | refs/heads/master | 2020-06-19T23:36:42.603167 | 2016-11-26T13:42:46 | 2016-11-26T13:42:46 | 74,827,976 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 429 | py | __author__ = "Natasza Szczypien"
class LogicOperators(object):
inputNodes = 2
hiddenNodes = 10
outputNodes = 1
target = 0
def __init__(self, target):
self.target = target
def target(self):
return self.target
def inputNodes(self):
return self.inputNodes
def hiddenNodes(self):
return self.hiddenNodes
def outputNodes(self):
return self.outputNodes
| [
"kiya.natasza@gmail.com"
] | kiya.natasza@gmail.com |
ebfe12ca6885e70c9bc5fc09d36ec026e5bad8d6 | 248e0e0a28dd3640331c4ffc581781cb8edad42d | /py-etl/main.py | 246e76e482ed6bbc57fa90e8b65dbdbfab0d45a5 | [] | no_license | danmcquillan/cosm | 70d3ea91f43e72b035af659059554af36707beed | ceec0db731f479084075626ed8b2508da65c3816 | refs/heads/master | 2021-01-24T23:34:18.014070 | 2012-09-19T14:51:02 | 2012-09-19T14:51:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | from sqlalchemy import *
# from sqlalchemy.orm import *
# from sqlalchemy.schema import *
from app import *
# ========
# = Main =
# ========
if __name__ == "__main__":
getDb().echo = True
initDb()
| [
"martin@dekstop.de"
] | martin@dekstop.de |
a3ab3c0e572b348740cdbbea4546af673b20815e | f01834a702d1cc6524cacaa7fabc8be251ecf35a | /fibonaci series using recursion.py | 5b60a510fc97a55cc20c1bdc077e791f6dad53ad | [] | no_license | shiwanisingh9818/Python | 00f75565cbb22ca2a8d99622e07d0b8b89eb44b6 | e522a4f6f65d4b00e4ddecdcbe11209d5a91b71f | refs/heads/master | 2022-11-15T19:55:32.742862 | 2020-07-10T17:19:24 | 2020-07-10T17:19:24 | 272,676,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 16 19:56:53 2020
@author: shiwa
"""
def recur_fibo(n):
if n <= 1:
return n
else:
return(recur_fibo(n-1) + recur_fibo(n-2))
# take input from the user
nterms = int(input("How many terms? "))
# check if the number of terms is valid
if nterms <= 0:
print("Plese enter a positive integer")
else:
print("Fibonacci sequence:")
for i in range(nterms):
print(recur_fibo(i)) | [
"noreply@github.com"
] | shiwanisingh9818.noreply@github.com |
873f832b9b4a502cdab6b718ab5f202b53555a0a | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib64/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/datetime/rsntpprovtontpauthkey.py | a7e4bb2cc3d0c51d834f75f92edff686fd33660f | [] | no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 8,541 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class RsNtpProvToNtpAuthKey(Mo):
"""
The authentication key to apply to a specific provider. Keys can be shared with different providers.
"""
meta = NamedSourceRelationMeta("cobra.model.datetime.RsNtpProvToNtpAuthKey", "cobra.model.datetime.NtpAuthKey")
meta.targetNameProps["id"] = "tnDatetimeNtpAuthKeyId"
meta.cardinality = SourceRelationMeta.ONE_TO_M
meta.moClassName = "datetimeRsNtpProvToNtpAuthKey"
meta.rnFormat = "rsntpProvToNtpAuthKey-%(tnDatetimeNtpAuthKeyId)s"
meta.category = MoCategory.RELATIONSHIP_TO_LOCAL
meta.label = "Relation to Datetime Authentication Key"
meta.writeAccessMask = 0x10000000001
meta.readAccessMask = 0x10000000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.parentClasses.add("cobra.model.datetime.NtpProv")
meta.superClasses.add("cobra.model.reln.Inst")
meta.superClasses.add("cobra.model.reln.To")
meta.superClasses.add("cobra.model.pol.NToRef")
meta.rnPrefixes = [
('rsntpProvToNtpAuthKey-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "forceResolve", "forceResolve", 107, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = True
prop.defaultValueStr = "yes"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("forceResolve", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14775, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "rType", "rType", 106, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "mo"
prop._addConstant("local", "local", 3)
prop._addConstant("mo", "mo", 1)
prop._addConstant("service", "service", 2)
meta.props.add("rType", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "state", "state", 103, PropCategory.REGULAR)
prop.label = "State"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unformed"
prop._addConstant("cardinality-violation", "cardinality-violation", 5)
prop._addConstant("formed", "formed", 1)
prop._addConstant("invalid-target", "invalid-target", 4)
prop._addConstant("missing-target", "missing-target", 2)
prop._addConstant("unformed", "unformed", 0)
meta.props.add("state", prop)
prop = PropMeta("str", "stateQual", "stateQual", 104, PropCategory.REGULAR)
prop.label = "State Qualifier"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "none"
prop._addConstant("default-target", "default-target", 2)
prop._addConstant("mismatch-target", "mismatch-target", 1)
prop._addConstant("none", "none", 0)
meta.props.add("stateQual", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "tCl", "tCl", 13318, PropCategory.REGULAR)
prop.label = "Target-class"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 4527
prop.defaultValueStr = "datetimeNtpAuthKey"
prop._addConstant("datetimeNtpAuthKey", None, 4527)
prop._addConstant("unspecified", "unspecified", 0)
meta.props.add("tCl", prop)
prop = PropMeta("str", "tContextDn", "tContextDn", 4990, PropCategory.REGULAR)
prop.label = "Target-context"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tContextDn", prop)
prop = PropMeta("str", "tDn", "tDn", 100, PropCategory.REGULAR)
prop.label = "Target-dn"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("tDn", prop)
prop = PropMeta("str", "tRn", "tRn", 4989, PropCategory.REGULAR)
prop.label = "Target-rn"
prop.isImplicit = True
prop.isAdmin = True
prop.range = [(0, 512)]
meta.props.add("tRn", prop)
prop = PropMeta("str", "tType", "tType", 4988, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "name"
prop._addConstant("all", "all", 2)
prop._addConstant("mo", "mo", 1)
prop._addConstant("name", "name", 0)
meta.props.add("tType", prop)
prop = PropMeta("str", "tnDatetimeNtpAuthKeyId", "tnDatetimeNtpAuthKeyId", 16589, PropCategory.REGULAR)
prop.label = "Auth Key Id"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 65535)]
prop.defaultValue = 1
prop.defaultValueStr = "1"
meta.props.add("tnDatetimeNtpAuthKeyId", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "tnDatetimeNtpAuthKeyId"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
def __init__(self, parentMoOrDn, tnDatetimeNtpAuthKeyId, markDirty=True, **creationProps):
namingVals = [tnDatetimeNtpAuthKeyId]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"collinsctk@qytang.com"
] | collinsctk@qytang.com |
440ab0d752b375b01d698c86a67743b7d9488307 | 8c6fa70bae915c70268c1180281b2b6d78399ce4 | /venv/Scripts/easy_install-script.py | 1ee2e312ba7ec426dc962ae681c830661f851f1f | [] | no_license | cha-n/PG | 6aad26fe32521e4713c0b0828b1365da7dcd1613 | 681051fff24f37302c2bba2f4614dc07386b3f89 | refs/heads/master | 2022-11-18T14:34:58.439384 | 2020-07-03T07:30:41 | 2020-07-03T07:30:41 | 276,623,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py | #!C:\Users\JCY\PycharmProjects\PG\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"chan01115@naver.com"
] | chan01115@naver.com |
828a231c05e4668229f8e782b16eaab5f32a9e37 | 963f1008aa3c770d8d15e794109912f44b1e3fd7 | /Advice/apps.py | aedd80c33b03b43341abee5a342fadc904353599 | [] | no_license | 12mohaned/callForHelp | e4dea90f6c38d8fcae7d8f8eebc18fb62260c6b3 | 170531f2cc8788e52309d0f14622c6474379399a | refs/heads/master | 2022-12-31T02:12:01.624542 | 2020-10-14T16:56:06 | 2020-10-14T16:56:06 | 299,133,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | from django.apps import AppConfig
class AdviceConfig(AppConfig):
name = 'Advice'
| [
"mohaned_boss@outlook.com"
] | mohaned_boss@outlook.com |
7524a59a073e25d6a4d924978ce923d29697289e | 631bda6c763448162fd3f6663d201e97cbfda36a | /static/ml_evening/homeworks/homework3.py | 12e69ad0f5e6b0b0716e3fcfcbdeba0d71d08b63 | [] | no_license | mamikonyana/mamikonyana.github.io | 47451086d87743817bd097a484f5280a199a81b9 | 7f9d5f2507f59b8e655a3de544e7dfb57cbd7784 | refs/heads/master | 2021-01-19T02:35:51.607792 | 2018-01-16T22:32:04 | 2018-01-16T22:32:04 | 12,716,852 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,247 | py | #!/usr/bin/env python3
"""
Homework3, template code
If you have any questions ask in #homeworks channel on slack.
"""
from __future__ import print_function
import numpy as np
import sys
def fit_ridge_regression(X, Y, l):
"""
Calculates and returns analityc solution for ridge regression.
:param X: data matrix (2 dimensional np.array)
:param Y: response variables (1 dimensional np.array)
:param l: regularization parameter lambda
:return: value of beta (1 dimensional np.array)
"""
# TODO: Implement fit_ridge_regression (same as previous homeworks)
beta = np.zeros(X.shape[1])
return beta
def gradient_descent(X, Y, l, epsilon, step_size, max_steps):
"""
Implement gradient descent using the value of the gradient
divided by number of samples.
:param X: data matrix (2 dimensional np.array)
:param Y: response variables (1 dimensional np.array)
:param l: regularization parameter lambda
:param epsilon: approximation strength
:param max_steps: maximum number of iterations before algorithm will
terminate.
:return: value of beta (1 dimensional np.array)
"""
beta = np.zeros(X.shape[1])
for s in range(max_steps):
# TODO: Implement iterations.
# Use normalized_gradient to calculate the gradient
pass
return beta
def ridge_loss_gradient(X, Y, beta, l):
"""
This function calculates the gradient for ridge regression for
parameter values beta.
:param X: data matrix (2 dimensional np.array)
:param Y: response variables (1 dimensional np.array)
:param beta: value of beta (1 dimensional np.array)
:param l: regularization parameter lambda
:return: normalized gradient, i.e. gradient normalized according to data
"""
# TODO: Implement
return np.zeros(X.shape[1])
def loss(X, Y, beta):
"""
Calculate sum of error squares divided by number of points.
:param X: data matrix (2 dimensional np.array)
:param Y: response variables (1 dimensional np.array)
:param beta: value of beta (1 dimensional np.array)
:return: 1/N * SUM (y - x beta)^2
"""
return
def d_dimensional_comparison(d, beta_star, num_points, sigma, l=1):
# Generate data, no need to touch this code.
beta_star = np.array(beta_star)
X_list = [np.random.uniform(-1, 1, num_points) for _ in range(d)]
X = np.column_stack(X_list)
X = np.column_stack((np.ones(num_points), X))
Y = np.random.normal(X.dot(beta_star), sigma)
# Calculate analytic and gradient descent beta hats.
beta_hat_analytic = fit_ridge_regression(X, Y, l=l)
beta_hat_grad = gradient_descent(X, Y, l=l, epsilon=1e-8, step_size=1e-2,
max_steps=10000)
# Testing code for your esimates.
if np.linalg.norm(beta_star - beta_hat_analytic) > 1.:
print('Your analytical betas is too far apart from beta star')
print('Analytical: ', beta_hat_analytic)
print('Beta star: ', beta_star)
sys.exit(1)
if np.linalg.norm(beta_hat_grad - beta_hat_analytic) > 1e-4:
print('Your gradient descent beta is too far apart from analytical '
'solution')
print('Beta gradient: ', beta_hat_grad)
print('Analytical: ', beta_hat_analytic)
sys.exit(1)
l_a = loss(X, Y, beta_hat_analytic)
l_gd = loss(X, Y, beta_hat_grad)
if abs((l_a - l_gd) / l_a) > 1e-8:
print('Your gradient and analytical losses are too far apart')
print('analytical loss:', l_a)
print('gradient loss:', l_gd)
sys.exit(1)
print('Passed')
if __name__ == '__main__':
# Fist test the signature of your gradient descent function.
beta_est = gradient_descent(np.array([[1, 2], [1, 3], [1, 4], [1, 5]]),
np.array([2, 3, 4, 5.01]),
l=0,
epsilon=1e-4,
step_size=1e-3,
max_steps=2)
assert beta_est.shape == (2,)
# Call comparison function with the given 5-dimensional beta (b0, ..., b5)
beta5d = [1.5, 2.2, 3.5, 4.4, 1.1, 3.9]
d_dimensional_comparison(5, beta5d, 200, 2, l=0.)
| [
"arsen@mamikonyan.am"
] | arsen@mamikonyan.am |
95b6b45adf278de736f8812369a3a7ba4ecd6f7a | 12ddc8c067f364335a2b602a3f098310f810c04b | /Arithmetic Arranger.py | c9d27e35067cdbbe0d5022f00b6abcb10dc37459 | [] | no_license | Radoslav681/Arithmetic-Arranger-freeCodeCamp-Project | 2ad9fb621ac0d51300e02b3ba5ec9c41bc9e64fa | b575c0959384005150292e4f1f53f1b76e0af3ff | refs/heads/main | 2023-01-14T02:13:08.913267 | 2020-11-14T00:25:53 | 2020-11-14T00:25:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,347 | py | def arithmetic_arranger(problems, solutions=False):
# Arranging vertically an arithmetic problem.
l1 = ""
l2 = ""
l3 = ""
l4 = ""
for pair, case in enumerate(problems):
numb1, symbol, numb2 = case.split()
if not numb1.isdigit() or not numb2.isdigit():
return "Error: Numbers must only contain digits."
a = int(numb1)
b = int(numb2)
if symbol == "-":
result = a - b
else:
result = a + b
numb_len = len(max([numb1,numb2], key=len))
l1 += numb1.rjust(numb_len + 2)
l2 += symbol + numb2.rjust(numb_len + 1)
l3 += "-" * (numb_len + 2)
l4 += str(result).rjust(numb_len + 2)
if pair < len(problems)-1:
l1 += " " * (len(problems)-1)
l2 += " " * (len(problems)-1)
l3 += " " * (len(problems)-1)
l4 += " " * (len(problems)-1)
if not symbol in ["+", "-"]:
return "Error: Operator must be '+' or '-'."
if len(numb1) > 4 or len(numb2) > 4:
return "Error: Numbers cannot be more than four digits."
if len(problems) > 5:
return "Error: Too many problems."
arranged_problems = l1 + "\n" + l2 + "\n" + l3
if solutions:
arranged_problems += "\n" + l4
return arranged_problems
print(arithmetic_arranger(["32 + 698", "3801 - 2", "45 + 43", "123 + 49"],True)) | [
"noreply@github.com"
] | Radoslav681.noreply@github.com |
74108a22b91ad3b4c6b46bc638f052f5195fb339 | e030b26ea0f45eda5a25bf18051e9231e604fdd5 | /doc/source/sphinxext/numpy_ext/docscrape_sphinx.py | bcf7e70731cc798b73e4f22a48c25d361f65c6d1 | [
"CECILL-B",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | neurospin/piws | d246dc1925c563964309e53f36fc44e48f929cf7 | 4ec6f60c6343623a82761c90c74642b4b372ffd1 | refs/heads/master | 2021-01-17T03:49:35.787846 | 2018-10-15T09:44:39 | 2018-10-15T09:44:39 | 32,163,903 | 0 | 17 | NOASSERTION | 2020-10-14T12:56:38 | 2015-03-13T15:29:57 | HTML | UTF-8 | Python | false | false | 8,004 | py | import re
import inspect
import textwrap
import pydoc
import sphinx
from docscrape import NumpyDocString
from docscrape import FunctionDoc
from docscrape import ClassDoc
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config=None):
config = {} if config is None else config
self.use_plots = config.get('use_plots', False)
NumpyDocString.__init__(self, docstring, config=config)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' ' * indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
if not self._obj or hasattr(self._obj, param):
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
# GAEL: Toctree commented out below because it creates
# hundreds of sphinx warnings
# out += ['.. autosummary::', ' :toctree:', '']
out += ['.. autosummary::', '']
out += autosum
if others:
maxlen_0 = max([len(x[0]) for x in others])
maxlen_1 = max([len(x[1]) for x in others])
hdr = "=" * maxlen_0 + " " + "=" * maxlen_1 + " " + "=" * 10
fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1)
n_indent = maxlen_0 + maxlen_1 + 4
out += [hdr]
for param, param_type, desc in others:
out += [fmt % (param.strip(), param_type)]
out += self._str_indent(desc, n_indent)
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default', '')]
for section, references in idx.iteritems():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex', '']
else:
out += ['.. latexonly::', '']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Returns', 'Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out, indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.use_plots = config.get('use_plots', False)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.use_plots = config.get('use_plots', False)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config=None):
self._f = obj
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| [
"antoine.grigis@cea.fr"
] | antoine.grigis@cea.fr |
51439ebd8e1773b758429994ad7c5900941e9235 | 99ae8cc30885cb5345ee896792418f4793b5a0b4 | /result_analysis/views/subject.py | 342baf001aa379b8460dc29abdcb9b1736d66ee1 | [] | no_license | hishamalip/asd_lab | 9c4b50cd6fc6d9d65c093aa9875df08f92a0e81d | 86682c2f94deb738dd129f1ea4c6f0365a9207a3 | refs/heads/master | 2020-07-10T02:57:13.339222 | 2019-12-16T04:28:33 | 2019-12-16T04:28:33 | 204,149,205 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 36,222 | py | # importing tabula python library for extracting data from PDF
import tabula
# storing data to 'df' variable
df = tabula.read_pdf("s4.pdf", pages='all')
# converting input pdf to csv format
#tabula.convert_into("s4.pdf", "subject.csv", output_format="csv", pages='all')
# storing data to x in array format
x = df.to_numpy()
# count_dept() : A function for returing the number of students appeared for exam
# start_index : Starting index of each department
def display(pass_count,fail_count,percentage):
total= pass_count + fail_count
print("STUDENT APPEARED FOR EXAM =" + str(total))
print("NUMBER OF STUDENTS PASSED =" + str(pass_count))
print("NUMBER OF STUDENTS FAILED =" + str(fail_count))
print("PASS PERCENTAGE =" + str(round(float(percentage),2)))
def count_dept(start_index):
count = 0
flag2= 0
for i in range(start_index, len(x)):
flag1 = 0
for j in range(0,2):
if type(x[i][j]) == float:
flag2 = 1
break
else:
flag1 = 1
if flag2 == 1:
break
if flag1 == 1:
count = count+1
return count
# percentage() : A function for returning total number of students passed,failed and the pass percentage
# start_index : Starting index of each department
# count : Total count of students appeared for exam in each deaprtment
def percentage(start_index,count):
# civil engineering
global fhs210, phs210, dhs210, fma202, pma202, dma202, fce202, pce202, dce202, fce204, pce204, dce204, fce206, pce206, dce206
global fce208, pce208, dce208, fce232, pce232, dce232, fce234, pce234, dce234
fhs210= phs210= dhs210= fma202= pma202= dma202= fce202= pce202= dce202= fce204= pce204= dce204= fce206= pce206= dce206= 0
fce208= pce208= dce208= fce232= pce232= dce232= fce234= pce234= dce234= 0
# electrical and electronics
global fhs200, phs200, dhs200, fee202, pee202, dee202, fee204, pee204, dee204, fee206, pee206, dee206, fee208, pee208, dee208, fee232, pee232, dee232, fee234, pee234, dee234
fee202= pee202= dee202= fee204= pee204= dee204= fee206= pee206= dee206= fee208= pee208= dee208= fee232= pee232= dee232= fee234= pee234= dee234= fhs200= phs200= dhs2000 = 0
# mechanical
global fme202, pme202, dme202, fme204, pme204, dme204, fme206, pme206, dme206, fme220, pme220, dme220, fme232, pme232, dme232
fhs210= phs210= dhs210= fme202= pme202= dme202= fme204= pme204= dme204= fme206= pme206= dme206= fme220= pme220= dme220= fme232= pme232= dme232 = 0
# computer science
global fcs202, pcs202, dcs202, fcs204, pcs204, dcs204, fcs206, pcs206, dcs206, fcs208, pcs208, dcs208, fcs232, pcs232, dcs232, fcs234, pcs234, dcs234
fcs202= pcs202= dcs202= fcs204= pcs204= dcs204= fcs206= pcs206= dcs206= fcs208= pcs208= dcs208= fcs232= pcs232= dcs232= fcs234= pcs234= dcs234= 0
# industrial
global fme218, pme218, dme218, fie202, pie202, die202, fme222, pme222, dme222, fma208, pma208, dma208, fie232, pie232, die232
fme218= pme218= dme218= fie202= pie202= die202= fme222= pme222= dme222= fma208= pma208= dma208= fie232= pie232= die232 = 0
# electronics and communication
global fec202, pec202, dec202, fec206, pec206, dec206, fec208, pec208, dec208, fec230, pec230, dec230
fec202= pec202= dec202= fec206= pec206= dec206= fec208= pec208= dec208= fec230= pec230= dec230 = 0
# applied electronics
global fma204, pma204, dma204, fae202, pae202, dae202, fec204, pec204, dec204, fae204, pae204, dae204, fee216, pee216, dee216, fec232, pec232, dec232, fae232, pae232 ,dae232
fma204= pma204= dma204= fae202= pae202= dae202= fec204= pec204= dec204= fae204= pae204= dae204= fee216= pee216= dee216= fec232= pec232= dec232= fae232= pae232= dae232 = 0
end_index= start_index + count
for i in range(start_index, end_index):
flaghs210= flagma202= flagce202= flagce204= flagce206= flagce208= flagce232= flagce234 = 0
# electrical flags
flaghs200 = flagee202 = flagee204 = flagee206 = flagee208 = flagee232 = flagee234 = 0
# mechanical flags
flagme202 = flagme204 = flagme206 = flagme232 = flagme220 = 0
#computer flags
flagcs202 = flagcs204 = flagcs206 = flagcs208 = flagcs232 = flagcs234 = 0
# industrial flags
flagme218 = flagie202 = flagme222 = flagma208 = flagie232 = 0
# electronics and communication flags
flagec202 = flagec206 = flagec208 = flagec230 = 0
# applied electronics flags
flagma204= flagae202= flagec204= flagae204= flagee216= flagec232= flagae232= 0
for j in range(1, 2):
t = x[i][j]
########################## CIVIL ENGINEERING ####################################
if 'HS210' in t:
hs210=t.index('HS210')
hs210=hs210+5
if t[hs210] == '(':
if t[hs210+1] == 'F' or (t[hs210+1] == 'A' and t[hs210+2] == 'b') or (t[hs210+1] == 'D' and t[hs210+1] == 'e') or t[hs210+1] == 'T':
# print(x[i][0])
fhs210=fhs210+1
flaghs210=1
if 'MA202' in t:
ma202=t.index('MA202')
ma202=ma202 + 5
if t[ma202] == '(':
if t[ma202+1] == 'F' or (t[ma202+ 1] == 'A' and t[ma202+ 2] == 'b') or (t[ma202+ 1] == 'D' and t[ma202+1] == 'e') or t[ma202+1] == 'T':
# print(x[i][0])
fma202=fma202+ 1
flagma202=1
if 'CE202' in t:
ce202=t.index('CE202')
ce202=ce202 + 5
if t[ce202] == '(':
if t[ce202+1] == 'F' or (t[ce202+1] == 'A' and t[ce202+2] == 'b') or (t[ce202+1] == 'D' and t[ce202+1] == 'e') or t[ce202+1] == 'T':
fce202=fce202+1
flagce202=1
if 'CE204' in t:
ce204=t.index('CE204')
ce204=ce204 + 5
if t[ce204] == '(':
if t[ce204+1] == 'F' or (t[ce204+1] == 'A' and t[ce204+2] == 'b') or (t[ce204+1] == 'D' and t[ce204+1] == 'e') or t[ce204+1] == 'T':
fce204=fce204+ 1
flagce204= 1
if 'CE206' in t:
ce206=t.index('CE206')
ce206=ce206 + 5
if t[ce206] == '(':
if t[ce206+1] == 'F' or (t[ce206+1] == 'A' and t[ce206+2] == 'b') or (t[ce206+1] == 'D' and t[ce206+1] == 'e') or t[ce206+1] == 'T':
fce206=fce206+ 1
flagce206= 1
if 'CE208' in t:
ce208=t.index('CE208')
ce208=ce208 + 5
if t[ce208] == '(':
if t[ce208+1] == 'F' or (t[ce208+1] == 'A' and t[ce208+2] == 'b') or (t[ce208+1] == 'D' and t[ce208+1] == 'e') or t[ce208+1] == 'T':
fce208=fce208+ 1
flagce208= 1
if 'CE232' in t:
ce232=t.index('CE232')
ce232=ce232 + 5
if t[ce232] == '(':
if t[ce232+1] == 'F' or (t[ce232+1] == 'A' and t[ce232+2] == 'b') or (t[ce232+1] == 'D' and t[ce232+1] == 'e') or t[ce232+1] == 'T':
fce232=fce232+ 1
flagce232= 1
if 'CE234' in t:
ce234=t.index('CE234')
ce234=ce234 + 5
if t[ce234] == '(':
if t[ce234+1] == 'F' or (t[ce234+1] == 'A' and t[ce234+2] == 'b') or (t[ce234+1] == 'D' and t[ce234+1] == 'e') or t[ce234+1] == 'T':
fce234=fce234+ 1
flagce234 = 1
############################## ELECTRICAL AND ELECTRONICS ENGINEERING[Full Time] #######################################
if 'HS200' in t:
hs200=t.index('HS200')
hs200=hs200 + 5
if t[hs200] == '(':
if t[hs200+1] == 'F' or (t[hs200+1] == 'A' and t[hs200+2] == 'b') or (t[hs200+1] == 'D' and t[hs200+1] == 'e') or t[hs200+1] == 'T':
fhs200=fhs200+1
flaghs200=1
if 'EE202' in t:
ee202=t.index('EE202')
ee202=ee202 + 5
if t[ee202] == '(':
if t[ee202+1] == 'F' or (t[ee202+1] == 'A' and t[ee202+2] == 'b') or (t[ee202+1] == 'D' and t[ee202+1] == 'e') or t[ee202+1] == 'T':
fee202=fee202+1
flagee202=1
if 'EE204' in t:
ee204=t.index('EE204')
ee204=ee204 + 5
if t[ee204] == '(':
if t[ee204+1] == 'F' or (t[ee204+1] == 'A' and t[ee204+2] == 'b') or (t[ee204+1] == 'D' and t[ee204+1] == 'e') or t[ee204+1] == 'T':
fee204=fee204+1
flagee204=1
if 'EE206' in t:
ee206=t.index('EE206')
ee206=ee206 + 5
if t[ee206] == '(':
if t[ee206+1] == 'F' or (t[ee206+1] == 'A' and t[ee206+2] == 'b') or (t[ee206+1] == 'D' and t[ee206+1] == 'e') or t[ee206+1] == 'T':
fee206=fee206+1
flagee206=1
if 'EE208' in t:
ee208=t.index('EE208')
ee208=ee208 + 5
if t[ee208] == '(':
if t[ee208+1] == 'F' or (t[ee208+1] == 'A' and t[ee208+2] == 'b') or (t[ee208+1] == 'D' and t[ee208+1] == 'e') or t[ee208+1] == 'T':
fee208=fee208+1
flagee208=1
if 'EE232' in t:
ee232=t.index('EE232')
ee232=ee232 + 5
if t[ee232] == '(':
if t[ee232+1] == 'F' or (t[ee232+1] == 'A' and t[ee232+2] == 'b') or (t[ee232+1] == 'D' and t[ee232+1] == 'e') or t[ee232+1] == 'T':
fee232=fee232+1
flagee232=1
if 'EE234' in t:
ee234=t.index('EE234')
ee234=ee234 + 5
if t[ee234] == '(':
if t[ee234+1] == 'F' or (t[ee234+1] == 'A' and t[ee234+2] == 'b') or (t[ee234+1] == 'D' and t[ee234+1] == 'e') or t[ee234+1] == 'T':
fee234=fee234+1
flagee234=1
############################## MECHANICAL ENGINEERING[Full Time] #######################################
if 'ME202' in t:
me202=t.index('ME202')
me202=me202 + 5
if t[me202] == '(':
if t[me202+1] == 'F' or (t[me202+1] == 'A' and t[me202+2] == 'b') or (t[me202+1] == 'D' and t[me202+1] == 'e') or t[me202+1] == 'T':
fme202=fme202+1
flagme202=1
if 'ME204' in t:
me204=t.index('ME204')
me204=me204 + 5
if t[me204] == '(':
if t[me204+1] == 'F' or (t[me204+1] == 'A' and t[me204+2] == 'b') or (t[me204+1] == 'D' and t[me204+1] == 'e') or t[me204+1] == 'T':
fme204=fme204+1
flagme204=1
if 'ME206' in t:
me206=t.index('ME206')
me206=me206 + 5
if t[me206] == '(':
if t[me206+1] == 'F' or (t[me206+1] == 'A' and t[me206+2] == 'b') or (t[me206+1] == 'D' and t[me206+1] == 'e') or t[me206+1] == 'T':
fme206=fme206+1
flagme206=1
if 'ME220' in t:
me220=t.index('ME220')
me220=me220 + 5
if t[me220] == '(':
if t[me220+1] == 'F' or (t[me220+1] == 'A' and t[me220+2] == 'b') or (t[me220+1] == 'D' and t[me220+1] == 'e') or t[me220+1] == 'T':
fme220=fme220+1
flagme220=1
if 'ME232' in t:
me232=t.index('ME232')
me232=me232 + 5
if t[me232] == '(':
if t[me232+1] == 'F' or (t[me232+1] == 'A' and t[me232+2] == 'b') or (t[me232+1] == 'D' and t[me232+1] == 'e') or t[me232+1] == 'T':
fme232=fme232+1
flagme232=1
################### COMPUTER SCIENCE & ENGINEERING ####################################
if 'CS202' in t:
cs202=t.index('CS202')
cs202=cs202 + 5
if t[cs202] == '(':
if t[cs202+1] == 'F' or (t[cs202+1] == 'A' and t[cs202+2] == 'b') or (t[cs202+1] == 'D' and t[cs202+1] == 'e') or t[cs202+1] == 'T':
fcs202=fcs202+ 1
flagcs202= 1
if 'CS204' in t:
cs204=t.index('CS204')
cs204=cs204 + 5
if t[cs204] == '(':
if t[cs204+1] == 'F' or (t[cs204+1] == 'A' and t[cs204+2] == 'b') or (t[cs204+1] == 'D' and t[cs204+1] == 'e') or t[cs204+1] == 'T':
fcs204=fcs204+1
flagcs204=1
if 'CS206' in t:
cs206=t.index('CS206')
cs206=cs206 + 5
if t[cs206] == '(':
if t[cs206+1] == 'F' or (t[cs206+1] == 'A' and t[cs206+2] == 'b') or (t[cs206+1] == 'D' and t[cs206+1] == 'e') or t[cs206+1] == 'T':
fcs206=fcs206+1
flagcs206=1
if 'CS208' in t:
cs208=t.index('CS208')
cs208=cs208 + 5
if t[cs208] == '(':
if t[cs208+1] == 'F' or (t[cs208+1] == 'A' and t[cs208+2] == 'b') or (t[cs208+1] == 'D' and t[cs208+1] == 'e') or t[cs208+1] == 'T':
fcs208=fcs208+1
flagcs208=1
if 'CS232' in t:
cs232=t.index('CS232')
cs232=cs232 + 5
if t[cs232] == '(':
if t[cs232+1] == 'F' or (t[cs232+1] == 'A' and t[cs232+2] == 'b') or (t[cs232+1] == 'D' and t[cs232+1] == 'e') or t[cs232+1] == 'T':
fcs232=fcs232+1
flagcs232=1
if 'CS234' in t:
cs234=t.index('CS234')
cs234=cs234 + 5
if t[cs234] == '(':
if t[cs234+1] == 'F' or (t[cs234+1] == 'A' and t[cs234+2] == 'b') or (t[cs234+1] == 'D' and t[cs234+1] == 'e') or t[cs234+1] == 'T':
fcs234=fcs234+1
flagcs234=1
############################## INDUSTRIAL ENGINEERING[Full Time] #######################################
if 'ME218' in t:
me218=t.index('ME218')
me218=me218 + 5
if t[me218] == '(':
if t[me218+1] == 'F' or (t[me218+1] == 'A' and t[me218+2] == 'b') or (t[me218+1] == 'D' and t[me218+1] == 'e') or t[me218+1] == 'T':
fme218=fme218+1
flagme218=1
if 'IE202' in t:
ie202=t.index('IE202')
ie202=ie202 + 5
if t[ie202] == '(':
if t[ie202+1] == 'F' or (t[ie202+1] == 'A' and t[ie202+2] == 'b') or (t[ie202+1] == 'D' and t[ie202+1] == 'e') or t[ie202+1] == 'T':
fie202=fie202+1
flagie202=1
if 'ME222' in t:
me222=t.index('ME222')
me222=me222 + 5
if t[me222] == '(':
if t[me222+1] == 'F' or (t[me222+1] == 'A' and t[me222+2] == 'b') or (t[me222+1] == 'D' and t[me222+1] == 'e') or t[me222+1] == 'T':
fme222=fme222+1
flagme222=1
if 'MA208' in t:
ma208=t.index('MA208')
ma208=ma208 + 5
if t[ma208] == '(':
if t[ma208+1] == 'F' or (t[ma208+1] == 'A' and t[ma208+2] == 'b') or (t[ma208+1] == 'D' and t[ma208+1] == 'e') or t[ma208+1] == 'T':
fma208=fma208+1
flagma208=1
if 'IE232' in t:
ie232=t.index('IE232')
ie232=ie232 + 5
if t[ie232] == '(':
if t[ie232+1] == 'F' or (t[ie232+1] == 'A' and t[ie232+2] == 'b') or (t[ie232+1] == 'D' and t[ie232+1] == 'e') or t[ie232+1] == 'T':
fie232=fie232+1
flagie232=1
############################## ELECTRONICS & COMMUNICATION ENGG[Full Time] #######################################
if 'EC202' in t:
ec202=t.index('EC202')
ec202=ec202 + 5
if t[ec202] == '(':
if t[ec202+1] == 'F' or (t[ec202+1] == 'A' and t[ec202+2] == 'b') or (t[ec202+1] == 'D' and t[ec202+1] == 'e') or t[ec202+1] == 'T':
fec202=fec202+1
flagec202=1
if 'EC206' in t:
ec206=t.index('EC206')
ec206=ec206 + 5
if t[ec206] == '(':
if t[ec206+1] == 'F' or (t[ec206+1] == 'A' and t[ec206+2] == 'b') or (t[ec206+1] == 'D' and t[ec206+1] == 'e') or t[ec206+1] == 'T':
fec206=fec206+1
flagec206=1
if 'EC208' in t:
ec208=t.index('EC208')
ec208=ec208 + 5
if t[ec208] == '(':
if t[ec208+1] == 'F' or (t[ec208+1] == 'A' and t[ec208+2] == 'b') or (t[ec208+1] == 'D' and t[ec208+1] == 'e') or t[ec208+1] == 'T':
fec208=fec208+1
flagec208=1
if 'EC230' in t:
ec230=t.index('EC230')
ec230=ec230 + 5
if t[ec230] == '(':
if t[ec230+1] == 'F' or (t[ec230+1] == 'A' and t[ec230+2] == 'b') or (t[ec230+1] == 'D' and t[ec230+1] == 'e') or t[ec230+1] == 'T':
fec230=fec230+1
flagec230=1
######################### APPLIED ELECTRONICS & INSTRUMENTATION ENGINEERING[Full Time] #########################################
if 'MA204' in t:
ma204=t.index('MA204')
ma204=ma204 + 5
if t[ma204] == '(':
if t[ma204+1] == 'F' or (t[ma204+1] == 'A' and t[ma204+2] == 'b') or (t[ma204+1] == 'D' and t[ma204+1] == 'e') or t[ma204+1] == 'T' or t[ma204+1] == 'i':
fma204=fma204+1
flagma204=1
if 'AE202' in t:
ae202=t.index('AE202')
ae202=ae202 + 5
if t[ae202] == '(':
if t[ae202+1] == 'F' or (t[ae202+1] == 'A' and t[ae202+2] == 'b') or (t[ae202+1] == 'D' and t[ae202+1] == 'e') or t[ae202+1] == 'T':
fae202=fae202+1
flagae202=1
if 'EC204' in t:
ec204=t.index('EC204')
ec204=ec204 + 5
if t[ec204] == '(':
if t[ec204+1] == 'F' or (t[ec204+1] == 'A' and t[ec204+2] == 'b') or (t[ec204+1] == 'D' and t[ec204+1] == 'e') or t[ec204+1] == 'T':
fec204=fec204+1
flagec204=1
if 'AE204' in t:
ae204=t.index('AE204')
ae204=ae204 + 5
if t[ae204] == '(':
if t[ae204+1] == 'F' or (t[ae204+1] == 'A' and t[ae204+2] == 'b') or (t[ae204+1] == 'D' and t[ae204+1] == 'e') or t[ae204+1] == 'T':
fae204=fae204+1
flagae204=1
if 'EE216' in t:
ee216=t.index('EE216')
ee216=ee216 + 5
if t[ee216] == '(':
if t[ee216+1] == 'F' or (t[ee216+1] == 'A' and t[ee216+2] == 'b') or (t[ee216+1] == 'D' and t[ee216+1] == 'e') or t[ee216+1] == 'T':
fee216=fee216+1
flagee216=1
if 'EC232' in t:
ec232=t.index('EC232')
ec232=ec232 + 5
if t[ec232] == '(':
if t[ec232+1] == 'F' or (t[ec232+1] == 'A' and t[ec232+2] == 'b') or (t[ec232+1] == 'D' and t[ec232+1] == 'e') or t[ec232+1] == 'T':
fec232=fec232+1
flagec232=1
if 'AE232' in t:
ae232=t.index('AE232')
ae232=ae232 + 5
if t[ae232] == '(':
if t[ae232+1] == 'F' or (t[ae232+1] == 'A' and t[ae232+2] == 'b') or (t[ae232+1] == 'D' and t[ae232+1] == 'e') or t[ae232+1] == 'T':
fae232=fae232+1
flagae232=1
# Civil engineering
if flaghs210 == 0:
phs210=phs210+1
if flagma202 == 0:
pma202=pma202+1
if flagce202 == 0:
pce202=pce202+1
if flagce204 == 0:
pce204=pce204+1
if flagce206 == 0:
pce206= pce206+1
if flagce208 == 0:
pce208= pce208+1
if flagce232 == 0:
pce232= pce232+1
if flagce234 == 0:
pce234= pce234+1
# electrical
if flaghs200 == 0:
phs200=phs200+1
if flagee202 == 0:
pee202=pee202+1
if flagee204 == 0:
pee204=pee204+1
if flagee206 == 0:
pee206=pee206+1
if flagee208 == 0:
pee208=pee208+1
if flagee232 == 0:
pee232=pee232+1
if flagee234 == 0:
pee234=pee234+1
# Mechanical
if flagme202 == 0:
pme202=pme202+1
if flagme204 == 0:
pme204=pme204+1
if flagme206 == 0:
pme206=pme206+1
if flagme220 == 0:
pme220=pme220+1
if flagme232 == 0:
pme232=pme232+1
#computer science
if flagcs202 == 0:
pcs202=pcs202+1
if flagcs204 == 0:
pcs204=pcs204+1
if flagcs206 == 0:
pcs206=pcs206+1
if flagcs208 == 0:
pcs208=pcs208+1
if flagcs232 == 0:
pcs232=pcs232+1
if flagcs234 == 0:
pcs234=pcs234+1
# industrial
if flagme218 == 0:
pme218 = pme218 + 1
if flagie202 == 0:
pie202 = pie202 + 1
if flagme222 == 0:
pme222 = pme222 + 1
if flagma208 == 0:
pma208 = pma208 + 1
if flagie232 == 0:
pie232 = pie232 + 1
# electronics and communication
if flagec202 == 0:
pec202 = pec202 + 1
if flagec206 == 0:
pec206 = pec206 + 1
if flagec208 == 0:
pec208 = pec208 + 1
if flagec230 == 0:
pec230 = pec230 + 1
#Applied Electronics
if flagma204 == 0:
pma204=pma204+1
if flagae202 == 0:
pae202=pae202+1
if flagec204 == 0:
pec204=pec204+1
if flagae204 == 0:
pae204=pae204+1
if flagee216 == 0:
pee216=pee216+1
if flagec232 == 0:
pec232=pec232+1
if flagae232 == 0:
pae232=pae232+1
#civil engineering
dhs210= (phs210*100)/count
dma202= (pma202*100)/count
dce202= (pce202*100)/count
dce204= (pce204*100)/count
dce206= (pce206*100)/count
dce208= (pce208*100)/count
dce232= (pce232*100)/count
dce234= (pce234*100)/count
# electrical and elctronics pass percentage
dhs200= (phs200*100)/count
dee202= (pee202*100)/count
dee204= (pee204*100)/count
dee206= (pee206*100)/count
dee208= (pee208*100)/count
dee232= (pee232*100)/count
dee234= (pee234*100)/count
# mechanical pass percentage
dme202= (pme202*100)/count
dme204= (pme204*100)/count
dme206= (pme206*100)/count
dme220= (pme220*100)/count
dme232= (pme232*100)/count
#computer science
dcs202= (pcs202*100)/count
dcs204= (pcs204*100)/count
dcs206= (pcs206*100)/count
dcs208= (pcs208*100)/count
dcs232= (pcs232*100)/count
dcs234= (pcs234*100)/count
# industrial pass percentage
dme218= (pme218*100)/count
die202= (pie202*100)/count
dme222= (pme222*100)/count
dma208= (pma208*100)/count
die232= (pie232*100)/count
# electronics and communication pass percentage
dec202 = (pec202*100)/count
dec206 = (pec206*100)/count
dec208 = (pec208*100)/count
dec230 = (pec230*100)/count
# applied electronics pass percentage
dma204= (pma204*100)/count
dae202= (pae202*100)/count
dec204= (pec204*100)/count
dae204= (pae204*100)/count
dee216= (pee216*100)/count
dec232= (pec232*100)/count
dae232= (pae232*100)/count
# Variables
percenatge_ce=0
percenatge_cs=0
percenatge_ec=0
percenatge_ee=0
percenatge_ae=0
percenatge_ie=0
percenatge_me=0
start_ce=0
start_ee=0
start_me=0
start_ie=0
start_ae=0
start_cs=0
start_ec=0
count_ce=0
count_ee=0
count_ec=0
count_ie=0
count_ae=0
count_cs=0
count_me=0
ce=0
ec=0
ee=0
me=0
ie=0
ec=0
cs=0
ae=0
# Loop which gives starting index of each department
for i in range(0,len(x)):
q=x[i][0]
if type(q) != float :
for j in range(0,len(q)):
if len(q) == 11 or len(q) ==10 :
# Finding starting index of civil engineering
if q[j] == 'C' and q[j+1 ] == 'E' and ce == 0:
#print("Civil Engineering")
dept='Civil Engineering'
ce=1
start_ce=i
break
# Finding starting index of electrical engineering
elif q[j] == 'E' and q[j+1] == 'E' and ee == 0:
#print("ELECTRICAL Engineering")
dept='Elctrical Engineering'
ee=1
start_ee=i
break
# Finding starting index of computer engineering
elif q[j] == 'C' and q[j+1] == 'S' and cs == 0:
#print("COMPUTER Engineering")
dept='Computer Engineering'
cs=1
start_cs=i
break
# Finding starting index of mechanical engineering
elif q[j] == 'M' and q[j+1] == 'E' and me == 0:
#print("MECHANICAL Engineering")
dept='mechanical Engineering'
me=1
start_me=i
break
# Finding starting index of industrial engineering
elif q[j] == 'I' and q[j+1] == 'E' and ie == 0:
#print("INDUSTRAIL Engineering")
dept='industrial Engineering'
ie=1
start_ie=i
break
# Finding starting index of applied engineering
elif q[j] == 'A' and q[j+1] == 'E' and ae==0:
#print("APPLIED Engineering")
dept='Applied Engineering'
ae=1
start_ae=i
break
# Finding starting index of electronics engineering
elif q[j] == 'E' and q[j+1] == 'C'and ec == 0:
#print("ELECTRONICS Engineering")
start_ec=i
dept='Electronics Engineering'
ec=1
break
count_ce=count_dept(start_ce)
count_me=count_dept(start_me)
count_cs=count_dept(start_cs)
count_ec=count_dept(start_ec)
count_ie=count_dept(start_ie)
count_ae=count_dept(start_ae)
count_ee=count_dept(start_ee)
print("CIVIL ENGINEERING ")
print(" ")
percentage(start_ce,count_ce)
print("PROBABILITY DISTRIBUTIONS, TRANSFORMS AND NUMERICAL METHODS ")
display(pma202,fma202,dma202)
print(" ")
print("STRUCTURAL ANALYSIS I")
display(pce202,fce202,dce202)
print(" ")
print("CONSTRUCTION TECHNOLOGY")
display(pce204,fce204,dce204)
print(" ")
print("FLUID MECHANICS II")
display(pce206,fce206,dce206)
print(" ")
print("GEOTECHNICAL ENGINEERING I")
display(pce208,fce208,dce208)
print(" ")
print("MATERIALS TESTING LAB I")
display(pce232,fce232,dce232)
print(" ")
print("FLUID MECHANICS LAB")
display(pce234,fce234,dce234)
print(" ")
print("LIFE SKILLS")
display(phs210,fhs210,dhs210)
print("-----------------------")
print(" ")
print("MECHANICAL ENGINEERING ")
print(" ")
percentage(start_me,count_me)
print("PROBABILITY DISTRIBUTIONS, TRANSFORMS AND NUMERICAL METHODS ")
display(pma202,fma202,dma202)
print(" ")
print("ADVANCED MECHANICS OF SOLIDS")
display(pme202,fme202,dme202)
print(" ")
print("THERMAL ENGINEERING")
display(pme204,fme204,dme204)
print(" ")
print("FLUID MACHINERY")
display(pme206,fme206,dme206)
print(" ")
print("MANUFACTURING TECHNOLOGY")
display(pme220,fme220,dme220)
print(" ")
print("THERMAL ENGINEERING LAB")
display(pme232,fme232,dme232)
print(" ")
print("LIFE SKILLS")
display(phs210,fhs210,dhs210)
print("-----------------------")
print(" ")
print("ELECTRICAL ENGINEERING ")
print(" ")
percentage(start_ee,count_ee)
print("PROBABILITY DISTRIBUTIONS, TRANSFORMS AND NUMERICAL METHODS ")
display(pma202,fma202,dma202)
print(" ")
print("SYNCHRONOUS AND INDUCTION MACHINES")
display(pee202,fee202,dee202)
print(" ")
print("DIGITAL ELECTRONICS AND LOGIC DESIGN")
display(pee204,fee204,dee204)
print(" ")
print("MATERIAL SCIENCE")
display(pee206,fee206,dee206)
print(" ")
print("MEASUREMENTS AND INSTRUMENTATION")
display(pee208,fee208,dee208)
print(" ")
print("ELECTRICAL MACHINES LAB I")
display(pee232,fee232,dee232)
print(" ")
print("CIRCUITS AND MEASUREMENTS LAB")
display(pee234,fee234,dee234)
print(" ")
print("BUSINESS ECONOMICS")
display(phs200,fhs200,dhs200)
print(" ")
print("INDUSTRIAL ENGINEERING ")
print(" ")
percentage(start_ie,count_ie)
print("INTRODUCTION TO STOCHASTIC MODELS")
display(pma208,fma208,dma208)
print(" ")
print("ELEMENTS OF MACHINE DESIGN")
display(pme218,fme218,dme218)
print(" ")
print("OBJECT ORIENTED PROGRAMMING & NUMERICAL METHODS THERMAL ENGINEERING II")
display(pie202,fie202,die202)
print(" ")
print("THERMAL ENGINEERING II")
display(pme222,fme222,dme222)
print(" ")
print("MANUFACTURING TECHNOLOGY")
display(pme220,fme220,dme220)
print(" ")
print("THERMAL ENGINEERING LAB")
display(pme232,fme232,dme232)
print(" ")
print("OBJECT ORIENTED PROGRAMMING LAB")
display(pie232,fie232,die232)
print(" ")
print("LIFE SKILLS")
display(phs210,fhs210,dhs210)
print(" ")
print("ELECTRONICS ENGINEERING ")
print(" ")
percentage(start_ec,count_ec)
print("PROBABILITY, RANDOM PROCESSES AND NUMERICAL METHODS")
display(pma204,fma204,dma204)
print(" ")
print("SIGNALS & SYSTEMS")
display(pec202,fec202,dec202)
print(" ")
print("ANALOG INTEGRATED CIRCUITS")
display(pee204,fee204,dee204)
print(" ")
print("COMPUTER ORGANIZATION")
display(pec206,fec206,dec206)
print(" ")
print("ANALOG COMMUNICATION ENGINEERING")
display(pee208,fee208,dee208)
print(" ")
print("ANALOG INTEGRATED CIRCUITS LAB")
display(pec232,fec232,dec232)
print(" ")
print("LOGIC CIRCUIT DESIGN LAB")
display(pec230,fec230,dec230)
print(" ")
print("BUSINESS ECONOMICS")
display(phs200,fhs200,dhs200)
print(" ")
print("COMPUTER SCIENCE AND ENGINEERING ")
print(" ")
percentage(start_cs,count_cs)
print("PROBABILITY DISTRIBUTIONS, TRANSFORMS AND NUMERICAL METHODS ")
display(pma202,fma202,dma202)
print(" ")
print("COMPUTER ORGANIZATION AND ARCHITECTURE")
display(pcs202,fcs202,dcs202)
print(" ")
print("OPERATING SYSTEMS")
display(pcs204,fcs204,dcs204)
print(" ")
print("OBJECT ORIENTED DESIGN AND PROGRAMMING")
display(pcs206,fcs206,dcs206)
print(" ")
print("PRINCIPLES OF DATABASE DESIGN")
display(pcs208,fcs208,dcs208)
print(" ")
print("FREE AND OPEN SOURCE SOFTWARE LAB")
display(pcs232,fcs232,dcs232)
print(" ")
print("DIGITAL SYSTEMS LAB")
display(pcs234,fcs234,dcs234)
print(" ")
print("BUSINESS ECONOMICS")
display(phs200,fhs200,dhs200)
print(" ")
print("APPLIED ELECTRONICS AND ENGINEERING ")
print(" ")
percentage(start_ae,count_ae)
print("PROBABILITY, RANDOM PROCESSES AND NUMERICAL METHODS ")
display(pma204,fma204,dma204)
print(" ")
print("COMPUTER PROGRAMMING")
display(pae202,fae202,dae202)
print(" ")
print("ANALOG INTEGRATED CIRCUITS")
display(pec204,fec204,dec204)
print(" ")
print("SENSORS AND TRANSDUCERS")
display(pae204,fae204,dae204)
print(" ")
print("ELECTRICAL ENGINEERING")
display(pee216,fee216,dee216)
print(" ")
print("BUSINESS ECONOMICS")
display(phs200,fhs200,dhs200)
print(" ")
print("ANALOG INTEGRATED CIRCUITS LAB")
display(pec232,fec232,dec232)
print(" ")
print("TRANSDUCERS AND INSTRUMENTATION LAB")
display(pae232,fae232,dae232)
print(" ")
| [
"hishamalip@gmail.com"
] | hishamalip@gmail.com |
d03994c74bdd9312e76cb0d873668757922371ac | a2bb841d56d652bfd55a1a4f153031b8c033d65d | /MNSIT.PY | 91682c0904ac7707485a44c77ad0091f45c5259e | [] | no_license | zhangyihao91/Neural-Network | ad91377e78ef30e86ee50b6fd6fa44de9418dbbd | 613939ee0e8c9dc895f6acbaa84126211f19c713 | refs/heads/master | 2023-01-05T08:55:13.531901 | 2020-11-04T04:33:18 | 2020-11-04T04:33:18 | 263,895,313 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,325 | py | import torch
import torchvision
import numpy as np
import torchvision.transforms as transforms
torch.backends.cudnn.benchmark = True
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5),(0.5))])
trainset = torchvision.datasets.MNIST(root='/home/zhang/Model',
train=True,
download=True,
transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
batch_size=100,
shuffle=True,
num_workers=2)
testset = torchvision.datasets.MNIST(root='/home/zhang/Model',
train=False,
download=True,
transform=transform)
testloader = torch.utils.data.DataLoader(testset,
batch_size=100,
shuffle=False,
num_workers=2)
classes = tuple(np.linspace(0, 9, 10, dtype=np.uint8))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
import torch.nn as nn
import torch.nn.functional as F
class mnist(nn.Module):
def __init__(self):
super(mnist, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3) # 28x28x32 -> 26x26x32
self.conv2 = nn.Conv2d(32, 64, 3) # 26x26x64 -> 24x24x64
self.pool = nn.MaxPool2d(2, 2) # 24x24x64 -> 12x12x64
self.dropout1 = nn.Dropout2d()
self.fc1 = nn.Linear(12 * 12 * 64, 128)
self.dropout2 = nn.Dropout2d()
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(F.relu(self.conv2(x)))
x = self.dropout1(x)
x = x.view(-1, 12 * 12 * 64)
x = F.relu(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
return x
import torch.optim as optim
model = mnist().cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(model.parameters(),
lr=0.0005, momentum=0.99, nesterov=True)
for epoch in range(20):
running_loss = 0.0
for i, (inputs, labels) in enumerate(trainloader, 0):
inputs = inputs.cuda()
labels = labels.cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 100 == 99:
print('[{:d}, {:5d}] loss: {:.3f}'
.format(epoch + 1, i + 1, running_loss / 100))
running_loss = 0.0
print('Finished Training')
correct = 0
total = 0
with torch.no_grad():
for (images, labels) in testloader:
images = images.cuda()
labels = labels.cuda()
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy: {:.2f} %%'.format(100 * float(correct/total)))
| [
"noreply@github.com"
] | zhangyihao91.noreply@github.com |
9030f24508ffc10dd013107931f14a0db38a1947 | b7a1a01b667c0b27b5f70f8c99c74a331de48817 | /learning_log/urls.py | 6069a82d176cc8015097199fef904c001e06c019 | [] | no_license | baha312/django_learning_log | 051f534d374f5e1a74f82245faf312bfbdf30012 | aee0a11c2c5d624e79755c1704021c5c31befd0f | refs/heads/master | 2023-08-15T15:00:30.582100 | 2020-04-19T19:39:54 | 2020-04-19T19:39:54 | 256,122,280 | 0 | 0 | null | 2021-09-22T18:53:26 | 2020-04-16T05:53:50 | Python | UTF-8 | Python | false | false | 898 | py | """learning_log URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('users/', include('users.urls', namespace='users')),
path('', include('learning_logs.urls', namespace='learning_logs')),
]
| [
"bahtiyarbolotbekov@gmail.com"
] | bahtiyarbolotbekov@gmail.com |
4de9520429448320a3720b569ff6ae90c4bae5ea | be33b68808ba8f4744c4ba970357e34362747ae1 | /videos_search/urls.py | c6ea7ad7e71991ca95a0e02bc17a66c9b2bc9604 | [] | no_license | apoorvkhare07/Youtube-search-apis | a37c2f065367c1ec79c72c17a6d409040a2b90cf | 4b023fbb80bf8a5ba6a9b1bcb162099055be0958 | refs/heads/master | 2020-05-03T14:52:47.142360 | 2019-04-16T15:02:17 | 2019-04-16T15:02:17 | 178,690,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from django.urls import path
import videos_search.views as views
urlpatterns = [
path('',views.VideoListView.as_view(), name='video-list-view'),
path('api/',views.ApiListView, name='video-list-api')
] | [
"apoorvkhare007@gmail.com"
] | apoorvkhare007@gmail.com |
8db98c466976482d52ff8c44c908d36a78a4830c | 66ba8fe37044b65313164a98614b28ef4a220873 | /core/tests/msfrpcdev.py | b1a1c10ac8251eab000225117bc3180c17a48ad9 | [] | no_license | eroa/reconator | 022d64b59882427064738bbf242b82a56c8de760 | 33c767155508ec49a5536977f8e16a06823c3853 | refs/heads/master | 2021-10-27T04:15:20.383146 | 2019-04-15T22:20:48 | 2019-04-15T22:20:48 | 30,401,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,780 | py | import nmap
import msfrpc
client =msfrpc.Msfrpc({})
client.login('msf','caillou')
# TODO msfrpc support
# def sshenum(ho, po):
# try:
# res = client.call('console.create')
# console_id = res['id']
# except:
# print "Console create failed\r\n"
# sys.exit()
#
# cmd = """use auxiliary/scanner/snmp/snmp_loginset RHOSTS %srun """ % host_listclient.call('console.write',[console_id, cmd])
def do_scan(ipad):
nmt = nmap.PortScanner()
nmt.scan(hosts=ipad, arguments='-sV -sS -T4 -nvvv -/tmp/msgrpcnamp',sudo=True)
for host in nmt.all_hosts():
for proto in nmt[host].all_protocols():
print('Protocol : {0}'.format(proto))
lport = list(nmt[host][proto].keys())
lport.sort()
for port in lport:
state = nmt[host][proto][port]
print('TCP port : {0}\tstate : {1}'.format(port, nmt[host][proto][port]))
if "ssh" in str(state):
print "TCP PORT:" + str(port) + " gotcha (http via dict)!!!"
# formata = str(host)+":"+str(port)
multProc(sshenum, str(host), str(port))
#multProc(callscript, str(host), str(port))
# (------------------------------------')
elif "ssh" in str(state):
multProc(sshenum, str(host), str(port))
elif "snmp" in str(state):
multProc(snmpenum, str(host), str(port))
elif "ftp" in str(state):
multProc(ftpenum, str(host), str(port))
elif "smb" in str(state):
multProc(smbenum, str(host), str(port))
elif "tor" in str(state):
multProc(torenum, str(host), str(port))
elif "ms-sql" in str(state):
multProc(mssqlenum, str(host), str(port))
# for host in nm.allhosts():
# ...
# e if nm[host].has_tcp(9050):
# ...
# print "zob"
# TODO utiliser resultats nmu
print('####################### nmt host: {0} '.format(targetformat))
if __name__ == "__main__":
# print(" RECONATOR : usage " + %s + "ip_list.txt" % sys.argv[0])**
if os.path.isdir("/tmp/msfrpc") == True:
print "/tmp/msfrpc exists"
else:
os.mkdir("/tmp/msfrpc", 0777)
if os.path.isdir("/tmp/msfrpc/nmap") == True:
print "/tmp/msfrpc/nmap exists"
else:
os.mkdir("/tmp/msfrpc/nmap", 0777)
f = open(sys.argv[1], 'r')
for ip in f:
report = multiprocessing.Process(target=do_scan, args=(ip,))
report.start()
f.close()
| [
"toxic@murene"
] | toxic@murene |
e4c2ae41b7aec6371b17182c26cbfda22f852b60 | b466a62a6b8151937212688c09b3a5704eaa7466 | /Python OOP - Exam Preparation - 2 April 2020/tests/test_battlefield.py | 86b729b594d2a13d2cc6756a5da43117a61aedc9 | [
"MIT"
] | permissive | DiyanKalaydzhiev23/OOP---Python | 89efa1a08056375496278dac3af97e10876f7728 | 7ac424d5fb08a6bd28dc36593e45d949b3ac0cd0 | refs/heads/main | 2023-07-08T08:23:05.148293 | 2021-08-13T12:09:12 | 2021-08-13T12:09:12 | 383,723,287 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,693 | py | from unittest import TestCase, main
from project.battle_field import BattleField
from project.controller import Controller
class TestBattleField(TestCase):
def setUp(self):
self.c = Controller()
self.c.add_player("Beginner", "pesho")
self.c.add_player("Advanced", "ivan")
self.c.add_card("Magic", "boom")
self.c.add_card("Trap", "oops")
self.c.add_player_card("pesho", "boom")
self.c.add_player_card("ivan", "oops")
self.c.add_player_card("ivan", "boom")
self.attacker = self.c.player_repository.find("pesho")
self.enemy = self.c.player_repository.find("ivan")
self.b = BattleField()
def test_attacker_enemy_dead(self):
self.attacker.health = 0
self.enemy.health = 0
with self.assertRaises(ValueError) as ve:
self.c.fight("pesho", "ivan")
self.assertEqual("Player is dead!", str(ve.exception))
def test_increase_beginner(self):
self.b.increase_beginner(self.attacker)
self.assertEqual(90, self.attacker.health)
def test_getting_bonus_points(self):
self.b.get_bonus_points(self.attacker)
self.b.get_bonus_points(self.enemy)
self.assertEqual(130, self.attacker.health)
self.assertEqual(335, self.enemy.health)
def test_attacker_is_dead_after_fight(self):
self.c.fight("pesho", "ivan")
self.c.fight("pesho", "ivan")
self.assertTrue(self.attacker.is_dead)
def test_enemy_is_dead_after_fight(self):
self.c.fight("ivan", "pesho")
self.c.fight("ivan", "pesho")
self.assertTrue(self.attacker.is_dead)
if __name__ == '__main__':
main()
| [
"diankostadenov@gmail.com"
] | diankostadenov@gmail.com |
ed4963184ecbbb28726f2833700b4965372f42e3 | 8109c33444dafc35a9e8399e3b69d1e51278c65f | /second_experiment/rename.py | 834a0635e6f47a0b064aeacce231864cfcf2115d | [
"MIT"
] | permissive | jcchouz/Paste-Video-Classification | 8a320366e2c57614a21869b7c0ab0f77b731ac40 | 58af07a343bcd4f95f26a71d6ae9791552b2fe6e | refs/heads/master | 2023-02-08T23:01:33.378138 | 2020-01-08T10:01:07 | 2020-01-08T10:01:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | # -_- coding: utf-8 -_
import os
density = 200
while density <= 780:
path = './images_ash_sand_1_16_gamma_test/' + str(density)
files = os.listdir(path)
i = 1
for file in files:
source_file = os.path.join(path, file)
os.rename(source_file, os.path.join(path, '%d_%s.jpg' % (density, os.path.basename(file)[-7:-4])))
i += 1
if density == 780:
break
if density < 600:
density += 50
else:
density += 5 | [
"a774845313@163.com"
] | a774845313@163.com |
ac4c91a50fd1f04ce141715e5289aa64f8765f8f | 0bb474290e13814c2498c086780da5096453da05 | /agc034/B/main.py | dcdc2a07ea70836db87eccb7f03314c35c2aad03 | [] | no_license | ddtkra/atcoder | 49b6205bf1bf6a50106b4ae94d2206a324f278e0 | eb57c144b5c2dbdd4abc432ecd8b1b3386244e30 | refs/heads/master | 2022-01-25T15:38:10.415959 | 2020-03-18T09:22:08 | 2020-03-18T09:22:08 | 208,825,724 | 1 | 0 | null | 2022-01-21T20:10:20 | 2019-09-16T14:51:01 | Python | UTF-8 | Python | false | false | 721 | py | #!/usr/bin/env python3
import sys
def solve(s: str):
s = s.replace('BC', 'X')
ans = 0
cur = 0
for i in range(len(s)):
if(s[i] == 'A'):
cur += 1
elif(s[i] == 'X'):
ans += cur
else:
cur = 0
print(ans)
return
# Generated by 1.1.4 https://github.com/kyuridenamida/atcoder-tools (tips: You use the default template now. You can remove this line by using your custom template)
def main():
def iterate_tokens():
for line in sys.stdin:
for word in line.split():
yield word
tokens = iterate_tokens()
s = next(tokens) # type: str
solve(s)
if __name__ == '__main__':
main()
| [
"deritefully@gmail.com"
] | deritefully@gmail.com |
9db0f9f6112cc72d53b679c65473ed67a818e2db | 1b216d412c462d4df8ba0c2c116cac07d73f8949 | /webapps/tragether/models.py | 3091a80fde662c606e2cd801e34adb40264fb612 | [] | no_license | liangxt/webapps | 4c52a03e9c77624d6bde466db6b06e5082b7e30a | 65add918d2281a791d712dc2abe6bbee52648c39 | refs/heads/master | 2021-01-17T15:43:31.841848 | 2017-03-06T19:16:08 | 2017-03-06T19:16:08 | 84,110,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,687 | py | from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MinValueValidator, MaxValueValidator
from django.db.models import Max
from django.utils import timezone
from django.utils.html import escape
from vote.managers import VotableManager
from tragether.choice import *
import sys
import pytz
from django.contrib.staticfiles.templatetags.staticfiles import static
def default_time():
return timezone.now() + timezone.timedelta(+1)
class Travel(models.Model):
creator = models.ForeignKey(User, null=True)
destination = models.CharField(max_length=40)
group_size = models.PositiveIntegerField(validators=[MinValueValidator(1)])
start_time = models.DateTimeField(default=timezone.datetime.today())
end_time = models.DateTimeField(default=default_time())
budget = models.FloatField(validators=[MinValueValidator(0.0),
MaxValueValidator(sys.float_info.max)])
info = models.CharField(max_length=420)
status = models.CharField(max_length=1, choices=STATUSCHOICE, default="1")
def __unicode__(self):
return self.destination
def __str__(self):
return self.destination
@property
def get_members(self):
lst = []
for member in self.member.all():
lst.append(member.user)
return lst
@property
def get_applied_users(self):
lst = []
for applied_msg in self.travel_applied_message.filter(applied=True, read_status=False):
lst.append(applied_msg.sender)
return lst
@property
def get_invited_users(self):
lst = []
for invited_msg in self.travel_applied_message.filter(applied=False, read_status=False):
lst.append(invited_msg.receiver)
return lst
def upload_to_func(instance, filename):
return 'photos/%s' % instance.user.username
class Person(models.Model):
user = models.OneToOneField(User)
age = models.IntegerField(null=True, blank=True,
validators=[MinValueValidator(0), MaxValueValidator(125)])
bio = models.CharField(max_length=420, default="", blank=True)
gender = models.CharField(max_length=1, choices=GENDER_CHOICES)
picture = models.ImageField(upload_to=upload_to_func, blank=True)
travel_in = models.ManyToManyField(Travel, related_name='member')
def __unicode__(self):
return self.user.username
def __str__(self):
return self.user.username
@property
def get_pic_url(self):
if not self.picture:
return static('tragether/image/photo_holder.jpg')
return self.picture.url
class Chatbox_Messages(models.Model):
travel = models.ForeignKey(Travel)
sender = models.ForeignKey(User)
content = models.CharField(max_length=420)
datetime = models.DateTimeField(auto_now=True)
def __unicode__(self):
return self.content
def __str__(self):
return self.content
@property
def get_photo_url(self):
person = Person.objects.get(user=self.sender)
return person.get_pic_url
@property
def html(self):
return "<div class='row row-msg'>\
<div class='col-md-2 col-xs-2 msg-text-photo'>\
<img src='%s' alt=' %s' class='img-responsive'>\
</div>\
<div class='col-md-10 col-xs-10 msg-text-photo'>\
<div class='messages'>\
<p class='msg-content'>%s</p>\
<p class='msg-time-user'>%s from %s</p>\
</div></div></div>" % (self.get_photo_url, \
self.sender, escape(self.content), \
self.datetime.astimezone(pytz.timezone('US/Eastern')).strftime("%Y-%m-%d %H:%M:%S"), \
self.sender)
class Itinerary(models.Model):
deleted = models.BooleanField(default=False)
last_changed = models.DateTimeField(auto_now=True)
travel = models.ForeignKey(Travel)
place = models.CharField(max_length=420)
latitude = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True)
longitude = models.DecimalField(max_digits=9, decimal_places=6, blank=True, null=True)
start_time = models.DateTimeField()
def __unicode__(self):
return self.place
def __str__(self):
return self.__unicode__()
@property
def get_start_time(self):
return self.start_time.astimezone(pytz.timezone('US/Eastern')).strftime("%Y-%m-%d %H:%M")
@property
def html(self):
return "<tr id='itinerary_%d'><td>%s</td><td>%s</td><td id='%d'>\
<a class='btn btn-info btn-xs itinerary-edit-delete-icon btn-itinerary-edit'><span class='glyphicon glyphicon-edit'></span></a><a class='btn btn-danger btn-xs itinerary-edit-delete-icon btn-itinerary-delete'><span class='glyphicon glyphicon-remove'></span></a>\
</td></tr>" % (self.id, self.start_time.astimezone(pytz.timezone('US/Eastern')).strftime("%Y-%m-%d %H:%M"), \
escape(self.place), self.id)
@staticmethod
def get_max_time(travel):
return Itinerary.objects.filter(travel=travel).aggregate(Max('last_changed'))['last_changed__max'] or "1970-01-01T00:00+00:00"
@staticmethod
def get_itineraries(travel):
return Itinerary.objects.filter(travel=travel, deleted=False).distinct().order_by('start_time')
@staticmethod
def update_itineraries(travel, time="1970-01-01T00:00+00:00"):
return Itinerary.objects.filter(travel=travel, last_changed__gt=time).distinct().order_by('start_time')
class ApplyInviteMsg(models.Model):
travel = models.ForeignKey(Travel, related_name='travel_applied_message')
sender = models.ForeignKey(User, related_name='sender')
receiver = models.ForeignKey(User, related_name='receiver')
datetime = models.DateTimeField(default=timezone.datetime)
subject = models.CharField(max_length=42, blank=True)
content = models.CharField(max_length=2048)
read_status = models.BooleanField(default=False)
accept_status = models.BooleanField(default=False)
applied = models.BooleanField(default=True)
def __unicode__(self):
return self.travel.destination
def __str__(self):
return self.travel.destination
class Attraction(models.Model):
name = models.CharField(max_length=30)
votes = VotableManager()
def __unicode__(self):
return self.name
def __str__(self):
return self.name
class Poll(models.Model):
travel = models.OneToOneField(Travel)
attraction = models.ManyToManyField(Attraction)
def __unicode__(self):
return self.travel.destination
def __str__(self):
return self.travel.destination
| [
"liangxt07@gmail.com"
] | liangxt07@gmail.com |
bcfd51a123e43b32031694c438997f975b4979b3 | 859fb05b3e806c338aa0df6d235b351e7f4cfe4e | /src/jobs/migrations/0002_auto_20210117_0256.py | f6ff1c985e46834f5c25c0cd4e5427ee23649609 | [] | no_license | mahmoudshaheen1988/django-project | 3a7c2e365bc9ef8d9ec66dfaaa1f6a3e05f71ee1 | 28d3bde5c7e13d8308d620d344de099b28bdbd5e | refs/heads/main | 2023-02-23T18:14:28.838113 | 2021-01-17T00:14:33 | 2021-01-17T00:14:33 | 330,227,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | # Generated by Django 3.1 on 2021-01-16 23:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('jobs', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='job',
name='descripctions',
field=models.TextField(default='', max_length=1000),
preserve_default=False,
),
migrations.AddField(
model_name='job',
name='job_type',
field=models.CharField(choices=[('Full Time', 'Full Time'), ('part Time', 'Part Time')], default='', max_length=15),
preserve_default=False,
),
]
| [
"mahmoudshaheen198811@gmail.com"
] | mahmoudshaheen198811@gmail.com |
18169718282ec7bfbfb2b7d2c8bd1613b7b9aa52 | 9b8e2992a38f591032997b5ced290fe1acc3ad94 | /lcs4t.py | ede392018cce26478bbc4a6e676503d973b8be70 | [] | no_license | girishdhegde/aps-2020 | c694443c10d0d572c8022dad5a6ce735462aaa51 | fb43d8817ba16ff78f93a8257409d77dbc82ced8 | refs/heads/master | 2021-08-08T04:49:18.876187 | 2021-01-02T04:46:20 | 2021-01-02T04:46:20 | 236,218,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | from collections import defaultdict
import math
t=int(input())
for i in range(t):
n, total=map(int,input().split())
coin = []
values = defaultdict(list)
y = list(map(int,input().split()))
for j in range(n):
coin.append(y[j])
values[y[j]].append(0)
coins = []
for j in range(n):
if coin[j]!=1:
coins.append(coin[j])
print("coins:", coins)
if(len(coins) == 1):
if(total%coins[0]==0):
print("NO")
else:
values[coins[0]][0]=math.ceil(total/coins[0])
print("YES",end=" ")
x=list(values.values())
for h in x:
print(h[0],end=" ")
else:
coins=sorted(coins,reverse=True)
flag=0
for c in coins:
if total%c==0:
d=total/c-1
values[c][0]=int(d)
total-=d*c
else:
flag=1
d=math.ceil(total/c)
values[c][0]=int(d)
break
if flag==0:
print("NO")
else:
print("YES",end=" ")
x=list(values.values())
for h in x:
print(h[0],end=" ") | [
"girsihdhegde12499@gmail.com"
] | girsihdhegde12499@gmail.com |
3a4e8511ec4573a3af2a109c2b6a3a74de8d20f4 | c480cdcab43a81afc06e269d6624b7d1b2700941 | /venv/Scripts/pip3.6-script.py | e8cbc0879a669968ae190b96363ab39703e9d07d | [] | no_license | jiangbiaoah/Stocks | d2f3946ee70a7b7c1ab7a9a58c4621d4c778108f | 0c71a753fd7402ad53db846944abbb4b294424fa | refs/heads/master | 2020-03-24T13:13:20.709291 | 2018-08-10T04:13:30 | 2018-08-10T04:13:30 | 142,739,172 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | #!D:\Workspace\Python\Stocks\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.6'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.6')()
)
| [
"jiangbiaoah@163.com"
] | jiangbiaoah@163.com |
413febb5de0ea3bcfb0e88911e7ad6f7de1162fa | bff3e02509c2a0f2ed1e250ef64c6c82b672f8f6 | /lahma/services/db_connector.py | 1324ee8a5124d4107d4b9bc41ac65c430cc8d83b | [] | no_license | gdamaskos/flask_with_sql_connector | e7f5ca3e8c644c39191325853568bd473e3d5a36 | 2c26b064b8373e0cd5de195cdba4c35f251e3a04 | refs/heads/main | 2023-05-28T13:04:48.624255 | 2021-06-10T16:40:35 | 2021-06-10T16:40:35 | 362,398,603 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,465 | py | #!/usr/bin/python3
import mysql.connector as mariadb
import concurrent.futures
from flask import json
#import json
import os
from lahma import app
class IdNotFoundError(ValueError):
pass
class NoProteinError(ValueError):
pass
class CalphaTraceError(ValueError):
pass
def query_database(pdb_id):
"""
Fetches data from the SQL database supporting the homology-based annotation and returns it in a format suitable for
visualization on the main page of the LAHMA website.
is_in_pdb is a boolean to determine if it was user-defined or not: if so, the query data is retrieved from JSON
files instead of the database.
"""
connection = mariadb.connect(user=app.config['DB_USERNAME'], password=app.config['DB_PASSWORD'], host=app.config['DB_IP'], database=app.config['DB_NAME'])
cursor = connection.cursor()
#retrieve information: start with finding all chains
cursor.execute("SELECT DISTINCT chain "
"FROM Residue "
"WHERE pdbid = '" + pdb_id + "';")
chain_rows = cursor.fetchall()
#find out if there is a warning specifically for this entry
cursor.execute("SELECT message "
"FROM Warning "
"WHERE pdbid = '" + pdb_id + "';")
warning_rows = cursor.fetchall()
#If nothing is found, return error
if not chain_rows and not warning_rows:
raise IdNotFoundError
#define warnings
warnings = []
if warning_rows:
warnings = [w[0] for w in warning_rows]
for w in warnings:
if w == "No protein detected":
raise NoProteinError
elif w == "Only C-alpha-trace of protein present":
raise CalphaTraceError
#if no residues are found, but only warnings, return the warning
if not chain_rows:
return warnings, []
#all checks passed: data should be present. Find all chains
chains = []
for row in chain_rows:
chains.append(row[0])
#fetch also NCS information from the database.
cursor.execute("SELECT chain, ncschain "
"FROM NCSInfo "
"WHERE pdbid = '" + pdb_id + "';")
ncs_rows = cursor.fetchall()
ncs_data = []
#Return a list that shows which chains can be mapped to which other chains
ncs_chains_dealt_with = []
if ncs_rows:
for row in ncs_rows:
#check if the first chain is already identified before, if so add to NCS copies of that chain
identified_before = False
for elem in ncs_data:
if elem[0] == row[0]:
identified_before = True
elem[1] = elem[1] + row[1]
ncs_chains_dealt_with.append(row[1])
if not identified_before:
ncs_data.append([row[0], row[1]])
ncs_chains_dealt_with.append(row[0])
ncs_chains_dealt_with.append(row[1])
for chain in chains:
if not chain in ncs_chains_dealt_with:
ncs_data.append([chain, ''])
#find all the data per chain (multiprocessed)
output = []
for chain in chains:
result = run_chain(chain, pdb_id)
output.append(result)
# finally, find the number of homologous chains on which annotation is based
select_num_homol_stmt = "SELECT "
is_first = True
for chain in chains:
if not is_first:
select_num_homol_stmt += ", "
select_num_homol_stmt += "CAST(SUM(CASE WHEN chain='" + chain + "' THEN 1 ELSE 0 END) AS CHAR) AS " + chain
is_first = False
select_num_homol_stmt += " FROM HomolMap WHERE pdbid='" + pdb_id + "';"
cursor.execute(select_num_homol_stmt)
num_homologs_data = cursor.fetchall()
num_homologs = []
for i in range(0, len(chains)):
if num_homologs_data[0][i]:
num_homologs.append(num_homologs_data[0][i])
else:
num_homologs.append(0)
connection.close()
return warnings, ncs_data, output, num_homologs
def read_json_files(pdb_id, json_dir):
""""
get the data via JSON files instead of via the database
"""
file_name_base = json_dir
if not file_name_base.endswith('/'):
file_name_base += '/'
file_name_base += pdb_id
with open(file_name_base + "_main_page.json", 'r') as f:
main_data = json.load(f)
chains = []
for chain_data in main_data:
chains.append(chain_data['ChainID'])
warnings =[]
warning_file = file_name_base + "_warnings.json"
if os.path.exists(warning_file) and os.path.getsize(warning_file) > 0:
with open(warning_file, 'r') as f:
warnings = json.load(f)
with open(file_name_base + "_homol_map.json", 'r') as f:
homol_data = json.load(f)
num_homologs = []
for chain in chains:
num_hom = 0
if isinstance(homol_data, list):
for homol in homol_data:
if homol['chain'] == chain:
num_hom += 1
num_homologs.append(num_hom)
ncs_file = file_name_base + "_ncs_info.json"
ncs_data = readNCSfile(ncs_file, chains)
return warnings, ncs_data, main_data, num_homologs
def readNCSfile(ncs_file, chains):
ncs_lines = ""
if os.path.exists(ncs_file) and os.path.getsize(ncs_file) > 0:
with open(ncs_file, 'r') as f:
ncs_lines = json.load(f)
ncs_data = []
if ncs_lines == None:
for chain in chains:
ncs_data.append([chain, ''])
return ncs_data
ncs_chains_dealt_with = []
for row in ncs_lines:
if row['chain'] in ncs_chains_dealt_with:
for elem in ncs_data:
if elem[0] == row['chain']:
elem[1] = elem[1] + row['ncschain']
ncs_chains_dealt_with.append(row['ncschain'])
else:
ncs_data.append([row['chain'], row['ncschain']])
ncs_chains_dealt_with.append(row['chain'])
ncs_chains_dealt_with.append(row['ncschain'])
for chain in chains:
if not chain in ncs_chains_dealt_with:
ncs_data.append([chain, ''])
return ncs_data
def run_chain(chain, pdb_id):
connection = mariadb.connect(user=app.config['DB_USERNAME'], password=app.config['DB_PASSWORD'], host=app.config['DB_IP'], database=app.config['DB_NAME'])
cursor = connection.cursor()
output_dict = {
"SEQUENCE" : "",
"RAMA CLASS" : "",
"RAMA Z-SCORE" : "",
"RAMA Z-SCORE RELATIVE" : "",
"ROTA Z-SCORE" : "",
"ROTA Z-SCORE RELATIVE" : "",
"ROTA PCT" : "",
"ROTA PCT RELATIVE" : "",
"RSCC Z-SCORE" : "",
"RSCC Z-SCORE RELATIVE" : "",
"CIS-TRANS" : ["", ""],
"POST TRANS MOD" : ["", ""],
"HSSP SEQ PCT" : "",
"HSSP ENTROPY" : "",
"NUM SYM CONTACTS" : "",
"NUM H-BOND MAIN" : "",
"NUM H-BOND SIDE" : "",
"HAS ALTERNATES" : "",
"PDB SEQ PCT" : "",
"PDB PCT ORDERED" : "",
"NUM LIGAND CONTACTS" : [],
"REL SURFACE ACC" : "",
"SEC STRUC ELEM" : ["", ""],
"CA TORS OUTLIER" : "",
"Residue numbers" : [],
"ChainID" : chain
}
cursor.execute("SELECT r.seqidx, r.resnum, d.paramnum, d.datavalue "
"FROM Residue r "
"INNER JOIN ResData d ON r.resid = d.resid "
"WHERE r.pdbid = '" + pdb_id + "' AND r.chain = '" + chain + "' "
"ORDER BY r.seqidx ASC, d.paramnum ASC;")
rows = cursor.fetchall()
prev_row_seq_id = -1
all_residue_data = []
for row in rows:
row_seq_id = row[0]
if prev_row_seq_id == -1:
prev_row_seq_id = row_seq_id
if row_seq_id == prev_row_seq_id:
all_residue_data.append(row)
else:
add_residue_to_output_data(all_residue_data, output_dict)
all_residue_data.clear()
prev_row_seq_id = row_seq_id
all_residue_data.append(row)
#add data of last residue
add_residue_to_output_data(all_residue_data, output_dict)
max_seq_idx = all_residue_data[0][0] #data is sorted by seqidx so last residue has the highest
#add ligand binding information
add_ligand_binding_info(cursor, int(max_seq_idx), output_dict, pdb_id, chain)
connection.close()
return output_dict
PARAMNUM_TO_PARAMNAME = {
1 : "SEQUENCE",
2 : "RAMA CLASS",
3 : "RAMA Z-SCORE",
4 : "RAMA Z-SCORE RELATIVE",
5 : "ROTA Z-SCORE",
6 : "ROTA Z-SCORE RELATIVE",
7 : "ROTA PCT",
8 : "ROTA PCT RELATIVE",
10 : "RSCC Z-SCORE",
11 : "RSCC Z-SCORE RELATIVE",
12 : "CIS-TRANS",
13 : "POST TRANS MOD",
14 : "POST TRANS MOD",
15 : "HSSP SEQ PCT",
16 : "HSSP ENTROPY",
17 : "NUM SYM CONTACTS",
18 : "NUM H-BOND MAIN",
19 : "NUM H-BOND SIDE",
20 : "HAS ALTERNATES",
21 : "PDB SEQ PCT",
22 : "PDB PCT ORDERED",
23 : "CIS-TRANS",
30 : "REL SURFACE ACC",
31 : "SEC STRUC ELEM",
32 : "SEC STRUC ELEM",
34 : "CA TORS OUTLIER"
}
def getNameFromNum(paramnum):
return PARAMNUM_TO_PARAMNAME.get(paramnum, "")
def add_residue_to_output_data(resdata, output_dict):
param_nums_present = []
rota_pct = -1
addResidueNumber(resdata[0][1], output_dict)
for datapoint in resdata:
paramnum = datapoint[2]
value = datapoint[3]
param_nums_present.append(paramnum)
if paramnum in (1, 2, 20, 34):
add_simple_letter(getNameFromNum(paramnum), value, output_dict)
elif paramnum in (3, 5):
add_torsion_zscore_letter(getNameFromNum(paramnum), value, output_dict)
elif paramnum == 10:
add_zscore_letter(getNameFromNum(paramnum), value, output_dict)
elif paramnum in (4, 6, 11):
add_relative_zscore_letter(getNameFromNum(paramnum), value, output_dict)
elif paramnum in (7, 15, 21, 22, 30):
add_percentage_letter(getNameFromNum(paramnum), value, output_dict)
if paramnum == 7:
rota_pct = int(value)
elif paramnum in (17, 18, 19):
add_number_letter(getNameFromNum(paramnum), value, output_dict)
elif paramnum == 8:
add_rota_relative_pct_letter(getNameFromNum(paramnum), int(value), rota_pct, output_dict)
elif paramnum == 16:
add_hssp_entropy_letter(getNameFromNum(paramnum), float(value), output_dict)
elif paramnum in (13, 31):
add_simple_letter_to_first_field(getNameFromNum(paramnum), value, output_dict)
elif paramnum == 12:
add_simple_letter_to_second_field(getNameFromNum(paramnum), value, output_dict)
elif paramnum in (14, 32):
add_percentage_letter_to_second_field(getNameFromNum(paramnum), value, output_dict)
elif paramnum == 23:
add_cis_trans_letter(getNameFromNum(paramnum), float(value), output_dict)
add_empty_data_letters(param_nums_present, output_dict)
def add_torsion_zscore_letter(paramname, value, output_dict):
score = int ( (float(value) + 2) * 2 + 1)
if score < 0:
score = 0
elif score > 9:
score = 9
output_dict[paramname] += str(score)
def add_zscore_letter(paramname, value, output_dict):
score = int ( (float(value) + 10) / 2 + 1)
if score < 0:
score = 0
elif score > 9:
score = 9
output_dict[paramname] += str(score)
def add_simple_letter(paramname, value, output_dict):
output_dict[paramname] += value
def add_simple_letter_to_first_field(paramname, value, output_dict):
output_dict[paramname][0] += value
def add_simple_letter_to_second_field(paramname, value, output_dict):
output_dict[paramname][1] += value
def add_relative_zscore_letter(paramname, value, output_dict):
score = int ( float(value) * 2 + 6 )
if score < 0:
score = 0
elif score > 9:
score = 9
output_dict[paramname] += str(score)
def getPctScore(value):
pct_score = int(value) // 10
if pct_score > 9: #i.e. in case of 100%, set number to 9
pct_score = 9
return pct_score
def add_percentage_letter(paramname, value, output_dict):
output_dict[paramname] += str(getPctScore(value))
def add_percentage_letter_to_second_field(paramname, value, output_dict):
output_dict[paramname][1] += str(getPctScore(value))
def add_number_letter(paramname, value, output_dict):
score = int(value)
if score > 9:
score = 9
output_dict[paramname] += str(score)
def add_rota_relative_pct_letter(paramname, value, rota_pct, output_dict):
if rota_pct == -1 or value == 1: #if no rota pct found or only 1 rotamer
output_dict[paramname] += '-'
else:
score = int(rota_pct * value * 0.05)
if score > 9:
score = 9
output_dict[paramname] += str(score)
def add_hssp_entropy_letter(paramname, value, output_dict):
score = 9 - int(value * 4)
if (score < 0):
score = 0
output_dict[paramname] += str(score)
def add_cis_trans_letter(paramname, omega, output_dict):
dev_from_180 = min(abs(omega - 180), abs(omega + 180))
if dev_from_180 < 30:
output_dict[paramname][0] += 'T'
elif dev_from_180 > 150:
output_dict[paramname][0] += 'C'
else:
output_dict[paramname][0] += 'D'
def add_empty_data_letters(param_nums_present, output_dict):
i = 1
num_params = 34
while i < num_params:
i += 1
if i in (9, 24, 25, 26, 27, 28, 29, 33) or i in param_nums_present:
continue #these parameters are not collected or were already found
elif i in (13, 23, 31):
output_dict[getNameFromNum(i)][0] += '-'
elif i in (12, 14, 32):
output_dict[getNameFromNum(i)][1] += '-'
else:
output_dict[getNameFromNum(i)] += '-'
def addResidueNumber(resnum, output_dict):
output_dict["Residue numbers"].append(resnum)
def add_ligand_binding_info(cursor, max_seq_idx, output_dict, pdbid, chain):
cursor.execute("SELECT r.seqidx, l.restype, l.chain, l.resnum, l.inscode "
"FROM Residue r "
"INNER JOIN Contact c ON r.resid = c.resid "
"INNER JOIN Ligand l ON l.ligresid = c.ligresid "
"WHERE r.pdbid = '" + pdbid + "' AND r.chain = '" + chain + "' "
"ORDER BY r.seqidx ASC;")
seq_ind_lig_bound = []
contacts_text = []
for row in cursor.fetchall():
seq_idx = int(row[0])
if seq_idx in seq_ind_lig_bound:
idx_in_list = seq_ind_lig_bound.index(seq_idx)
lig_text = row[1] + " " + str(row[2]) + " " + str(row[3])
if row[4] != ".":
lig_text += row[4]
contacts_text[idx_in_list].append(lig_text)
else:
seq_ind_lig_bound.append(seq_idx)
lig_text = row[1] + " " + str(row[2]) + " " + str(row[3])
if row[4] != ".":
lig_text += row[4]
contacts_text.append([lig_text])
next_contact_seq_idx = -1
if len(seq_ind_lig_bound) > 0:
next_contact_seq_idx = seq_ind_lig_bound.pop(0)
next_contact = contacts_text.pop(0)
idx = 0
while idx <= max_seq_idx:
if next_contact_seq_idx == idx:
output_dict["NUM LIGAND CONTACTS"].append(next_contact)
if len(seq_ind_lig_bound) > 0:
next_contact_seq_idx = seq_ind_lig_bound.pop(0)
next_contact = contacts_text.pop(0)
else:
next_contact_seq_idx = -1
else:
output_dict["NUM LIGAND CONTACTS"].append([])
idx += 1
#print(output_dict["NUM LIGAND CONTACTS"])
if __name__ == "__main__":
warnings, ncs_data, output, num_homologs = query_database('1dio', True)
print(ncs_data)
print(output)
| [
"georgedamaskos@gmail.com"
] | georgedamaskos@gmail.com |
91675a4da299a7d70adac40d236e890674f592c0 | b3f5df499f06fb0bf19fbcc862485b4a298cb185 | /week_9/2020_07_13.py | ffbeabdfcfafab4d1725985511349632f0dc3ca3 | [] | no_license | AlanJYLi/leetcode_practice_python | f9f6c703ca0a7aadbfd795985b722e8e8e5460d1 | 9e7a647de9430672de73223e1c07632879289d1e | refs/heads/master | 2022-12-21T01:01:54.062191 | 2020-09-21T20:43:55 | 2020-09-21T20:43:55 | 268,670,845 | 0 | 0 | null | 2020-07-13T14:12:44 | 2020-06-02T01:21:25 | Python | UTF-8 | Python | false | false | 2,683 | py | # 1119. Remove Vowels from a String
class Solution:
def removeVowels(self, S: str) -> str:
a = {'a','e','i','o','u'}
res = ''
for s in S:
if s not in a:
res += s
return res
# 1122. Relative Sort Array
class Solution:
def relativeSortArray(self, arr1: List[int], arr2: List[int]) -> List[int]:
if len(arr2) == 0:
return sorted(arr1)
seen = {}
notseen = []
for num in arr2:
seen[num] = []
for num in arr1:
if num in seen:
seen[num].append(num)
else:
notseen.append(num)
res = []
for num in arr2:
res = res + seen[num]
return res+sorted(notseen)
# 1128. Number of Equivalent Domino Pairs
class Solution:
def numEquivDominoPairs(self, dominoes: List[List[int]]) -> int:
store = {}
for a,b in dominoes:
p =(a,b) if a<=b else (b,a)
if p in store:
store[p] += 1
else:
store[p] = 1
res = 0
if len(store) == 0:
return res
else:
for p in store:
if store[p] > 1:
res += (store[p]-1)*store[p]/2
return int(res)
class Solution:
def numEquivDominoPairs(self, dominoes: List[List[int]]) -> int:
store = {}
res = 0
for a,b in dominoes:
p =(a,b) if a<=b else (b,a)
if p in store:
res += store[p]
store[p] += 1
else:
store[p] = 1
return res
# 1133. Largest Unique Number
class Solution:
def largestUniqueNumber(self, A: List[int]) -> int:
seen = set()
notseen = set()
for num in A:
if num not in seen and num not in notseen:
notseen.add(num)
elif num in notseen and num not in seen:
notseen.remove(num)
seen.add(num)
return max(notseen) if len(notseen)>0 else -1
# 1134. Armstrong Number
class Solution:
def isArmstrong(self, N: int) -> bool:
res = 0
target = N
p = len(str(N))
while N > 0:
res += (N%10)**p
N = N // 10
return res == target
# 1137. N-th Tribonacci Number
class Solution:
def tribonacci(self, n: int) -> int:
store = {0:0,1:1,2:1}
if n in store:
return store[n]
else:
for i in range(3,n+1):
store[i] = store[i-1]+store[i-2]+store[i-3]
return store[n] | [
"lijingyu.rg@gmail.com"
] | lijingyu.rg@gmail.com |
cad475eef10b3d96e45bef653a7b8069696be5a3 | 448b7ff400c0537ddd6ab8343a7d721d13d58b58 | /apps/users/forms.py | eccef96e27dd64d2a88d3c9cc692309c4a84e6cf | [] | no_license | leonhj17/muxue | 1f88662c0681ab0ca3eb3aae783d6eafc4b7a052 | bf588a6f1aa6022a5245fabbf413300223416195 | refs/heads/master | 2021-01-20T10:10:02.382183 | 2017-08-10T06:29:16 | 2017-08-10T06:29:16 | 90,328,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | # _*_ encoding:utf-8 _*_
from django import forms
from captcha.fields import CaptchaField
class LoginForm(forms.Form):
username = forms.CharField(required=True)
password = forms.CharField(required=True,min_length=5)
class RegisterForm(forms.Form):
email = forms.EmailField(required=True)
password = forms.CharField(required=True, min_length=5)
captcha = CaptchaField(error_messages={'invalid': u'验证码错误'})
| [
"huangjian17@outlook.com"
] | huangjian17@outlook.com |
eda051d72d323b88e5d07e61bdabdbd16c2948e5 | d6a3186af0aaa86b3936f1d98730b7120918b962 | /testing_practice/tests_django/car_v2.py | 91228379ab91290fe1f4b03df8524ddd44bd8be1 | [] | no_license | kranthy09/testing | edd6376733723ef58a8a5ecece31cbaf030ca45d | ecdd5ce3b3688b42181d5ccb74003ed97e79fbc9 | refs/heads/master | 2022-07-02T23:58:09.308746 | 2020-05-05T16:58:45 | 2020-05-05T16:58:45 | 261,354,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,592 | py | class Car:
def __init__(self,max_speed, acceleration, tyre_friction, color = None):
self._color = color
self.is_valid_data("max_speed", max_speed)
self.is_valid_data("acceleration", acceleration)
self.is_valid_data("tyre_friction", tyre_friction)
self._acceleration = acceleration
self._tyre_friction = tyre_friction
self._max_speed = max_speed
self._is_engine_started = False
self._current_speed = 0
def start_engine(self):
if self._is_engine_started:
print("Stop the engine to start_engine")
else:
self._is_engine_started = True
def accelerate(self):
if self._is_engine_started:
self._current_speed += self._acceleration
if self._current_speed > self._max_speed:
self._current_speed = self._max_speed
else:
print("Start the engine to accelerate")
def apply_brakes(self):
if self._is_engine_started:
self._current_speed -= self._tyre_friction
if self._current_speed <= 0:
self._current_speed = 0
else:
print("Start the engine to apply_breaks")
def sound_horn(self):
if self._is_engine_started:
print("Beep Beep")
else:
print("Start the engine to sound_horn")
def stop_engine(self):
if self._is_engine_started:
self._is_engine_started = False
else:
print("Start the engine to stop_engine")
@property
def max_speed(self):
return self._max_speed
@property
def acceleration(self):
return self._acceleration
@property
def tyre_friction(self):
return self._tyre_friction
@property
def color(self):
return self._color
@property
def is_engine_started(self):
return self._is_engine_started
@property
def current_speed(self):
return self._current_speed
@staticmethod
def is_valid_data(args, value):
if value > 0:
return True
else:
raise ValueError(f"Invalid value for {args}")
class Truck(Car):
def __init__(self,max_speed, acceleration, tyre_friction, max_cargo_weight, color=None):
super().__init__(max_speed, acceleration, tyre_friction, color)
self.is_valid_data("max_cargo_weight", max_cargo_weight)
self._max_cargo_weight = max_cargo_weight
self._weight_in_cargo = 0
def sound_horn(self):
if self._is_engine_started:
print("Honk Honk")
else:
print("Start the engine to sound_horn")
def load(self, cargo_weight):
self.is_valid_data("cargo_weight", cargo_weight)
if self._current_speed:
print("Cannot load cargo during motion")
else:
self._weight_in_cargo += cargo_weight
if self._weight_in_cargo > self._max_cargo_weight:
print(f"Cannot load cargo more than max limit: {self._max_cargo_weight}")
self._weight_in_cargo -= cargo_weight
def unload(self, cargo_weight):
self.is_valid_data("cargo_weight", cargo_weight)
if self._current_speed:
print("Cannot unload cargo during motion")
else:
self._weight_in_cargo -= cargo_weight
if self._weight_in_cargo < 0:
print(f"Cannot unload cargo less than min limit: {0}")
self._weight_in_cargo += cargo_weight
@property
def max_cargo_weight(self):
return self._max_cargo_weight
@property
def weight_in_cargo(self):
return self._weight_in_cargo
class RaceCar(Car):
def __init__(self, max_speed, acceleration, tyre_friction, color = None):
super().__init__(max_speed, acceleration, tyre_friction,color)
self._nitro = 0
def accelerate(self):
import math
super().accelerate()
if self._nitro:
self._current_speed += math.ceil(self._acceleration * 0.3)
self._nitro -= 10
if self._current_speed > self._max_speed:
self._current_speed = self._max_speed
def apply_brakes(self):
if self._current_speed > (0.5 * self._max_speed):
self._nitro += 10
super().apply_brakes()
def sound_horn(self):
if self._is_engine_started:
print("Peep Peep\nBeep Beep")
else:
print("Start the engine to sound_horn")
@property
def nitro(self):
return self._nitro | [
"g.kranthi2507@gmail.com"
] | g.kranthi2507@gmail.com |
a7719bec1ea22f590bd6c01cb8ac40b983df3eda | 25fac06a7b6ad96681390a97a3e3909f5fabee20 | /about/urls.py | 088ddae564f971e162317175f1c2814f253c0f98 | [] | no_license | zynpnd/Restaurant | b68a81960d6b672994a2df46341b148f96cd6921 | 97f8dbdf15c927b367b5e86aecec5a0e3a46baae | refs/heads/master | 2023-06-10T03:45:24.495885 | 2021-06-26T18:35:45 | 2021-06-26T18:35:45 | 380,490,759 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | from django.urls import path
from about import views
urlpatterns = [
path('about/', views.about),
] | [
"zsarican997@gmail.com"
] | zsarican997@gmail.com |
cd4b1c12109167b270b0c0cf0d8698ee6af197c5 | eb7e1ad96a713213c2dbbae4f53ba6ae4d619f91 | /data_loaders.py | 678f858ef3ace0ea97d42c029654047fc99f4f83 | [] | no_license | omarsayed7/Deep-Emotion | 8e8c2699ee3781407b0c06858c7fb598f4eb9669 | 7b9e45c087813b4339ad9b7030b790802e59ca9f | refs/heads/master | 2022-07-26T17:20:12.404318 | 2022-07-21T12:29:06 | 2022-07-21T12:29:06 | 240,705,758 | 199 | 86 | null | 2022-07-21T12:29:07 | 2020-02-15T12:23:56 | Python | UTF-8 | Python | false | false | 2,248 | py | import os
import pandas as pd
import numpy as np
from PIL import Image
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
class Plain_Dataset(Dataset):
def __init__(self,csv_file,img_dir,datatype,transform):
'''
Pytorch Dataset class
params:-
csv_file : the path of the csv file (train, validation, test)
img_dir : the directory of the images (train, validation, test)
datatype : string for searching along the image_dir (train, val, test)
transform: pytorch transformation over the data
return :-
image, labels
'''
self.csv_file = pd.read_csv(csv_file)
self.lables = self.csv_file['emotion']
self.img_dir = img_dir
self.transform = transform
self.datatype = datatype
def __len__(self):
return len(self.csv_file)
def __getitem__(self,idx):
if torch.is_tensor(idx):
idx = idx.tolist()
img = Image.open(self.img_dir+self.datatype+str(idx)+'.jpg')
lables = np.array(self.lables[idx])
lables = torch.from_numpy(lables).long()
if self.transform :
img = self.transform(img)
return img,lables
#Helper function
def eval_data_dataloader(csv_file,img_dir,datatype,sample_number,transform= None):
'''
Helper function used to evaluate the Dataset class
params:-
csv_file : the path of the csv file (train, validation, test)
img_dir : the directory of the images (train, validation, test)
datatype : string for searching along the image_dir (train, val, test)
sample_number : any number from the data to be shown
'''
if transform is None :
transform = transforms.Compose([transforms.ToTensor(),transforms.Normalize((0.5,),(0.5,))])
dataset = Plain_Dataset(csv_file=csv_file,img_dir = img_dir,datatype = datatype,transform = transform)
label = dataset.__getitem__(sample_number)[1]
print(label)
imgg = dataset.__getitem__(sample_number)[0]
imgnumpy = imgg.numpy()
imgt = imgnumpy.squeeze()
plt.imshow(imgt)
plt.show()
| [
"sayedomar74@gmail.com"
] | sayedomar74@gmail.com |
de7ce52b41660eee7eea8ff7603241674cd09c47 | 9da8754002fa402ad8e6f25659978bd269bbcec8 | /src/622A/cdf_622A.py | 696901db63211acbb043bb8a0098147f0db843e9 | [
"MIT"
] | permissive | kopok2/CodeforcesSolutionsPython | a00f706dbf368ba0846c8ae86d4145b5dd3e1613 | 35bec0dbcff47765b123b5fe60476014376153df | refs/heads/master | 2023-02-02T03:08:22.097651 | 2020-12-17T22:00:50 | 2020-12-17T22:00:50 | 196,035,812 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | import math
class CodeforcesTask622ASolution:
def __init__(self):
self.result = ''
self.n = 0
def read_input(self):
self.n = int(input())
def process_task(self):
n = int(math.sqrt(self.n))
a = (n + n ** 2) / 2
while a < self.n:
n += 1
a = (n + n ** 2) / 2
n -= 1
x = self.n - (n + n ** 2) / 2
self.result = str(int(x))
def get_result(self):
return self.result
if __name__ == "__main__":
Solution = CodeforcesTask622ASolution()
Solution.read_input()
Solution.process_task()
print(Solution.get_result())
| [
"oleszek.karol@gmail.com"
] | oleszek.karol@gmail.com |
7047569f62d60dc9c4e5bdb7e3177aabf9c79323 | 0ebe27ae590942f0efe56078ec4ee4c56c0312ad | /configV2.py | 360f188ce9aa7841bcc018de148adbdc1c1740e3 | [] | no_license | 7Osman7/BiClassifierInsects | ef265c548365adb0da3cf99700a6778a8a97e8e8 | dcdd6e4cd00b39e29137f703972aa38e708637eb | refs/heads/master | 2022-12-08T03:19:11.475401 | 2020-08-26T08:06:23 | 2020-08-26T08:06:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | #This is where hyper-parameters and paths are edited
#Hyper-parameters
EPOCHS = 200
BATCH_SIZE = 128
LEARNING_RATE = 0.0001
#Paths
TRAIN_PATH = 'C:\\Users\\MONB1\\Desktop\\Gluxkind_CNN\\training'
VALID_PATH = 'C:\\Users\\MONB1\\Desktop\\Gluxkind_CNN\\validation'
TEST_PATH = 'C:\\Users\\MONB1\\Desktop\\Gluxkind_CNN\\testing'
MODEL_STORE_PATH = 'C:\\Users\\MONB1\\Desktop\\Gluxkind_CNN\\'
| [
"noreply@github.com"
] | 7Osman7.noreply@github.com |
975273c217e092f254fb0412e8511e805ff5f3e7 | 3616a4046ec50c77eb5b117a678a7b233db18aac | /Python/Delete Node in a BST/main.py | 1930e78a896d875be38a85ced0217fdca145ffbe | [] | no_license | briansu2004/MyLeet | fe38cd10928d5faa3f449a65f13b2b87415d960e | 233d12deca34f51c3bb0406831cc07f3b72b50cf | refs/heads/master | 2023-06-01T05:04:07.685603 | 2021-07-01T17:57:40 | 2021-07-01T17:57:40 | 360,516,302 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,792 | py | """
https://leetcode.com/explore/learn/card/introduction-to-data-structure-binary-search-tree/141/basic-operations-in-a-bst/1006/
Delete Node in a BST
Given a root node reference of a BST and a key, delete the node with the given key in the BST. Return the root node reference (possibly updated) of the BST.
Basically, the deletion can be divided into two stages:
Search for a node to remove.
If the node is found, delete the node.
Follow up: Can you solve it with time complexity O(height of tree)?
Example 1:
Input: root = [5,3,6,2,4,null,7], key = 3
Output: [5,4,6,2,null,null,7]
Explanation: Given key to delete is 3. So we find the node with value 3 and delete it.
One valid answer is [5,4,6,2,null,null,7], shown in the above BST.
Please notice that another valid answer is [5,2,6,null,4,null,7] and it's also accepted.
Example 2:
Input: root = [5,3,6,2,4,null,7], key = 0
Output: [5,3,6,2,4,null,7]
Explanation: The tree does not contain a node with value = 0.
Example 3:
Input: root = [], key = 0
Output: []
Constraints:
The number of nodes in the tree is in the range [0, 104].
-105 <= Node.val <= 105
Each node has a unique value.
root is a valid binary search tree.
-105 <= key <= 105
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def deleteNode(self, root: TreeNode, key: int) -> TreeNode:
if not root:
return None
if root.val > key:
# Target node is smaller than currnet node, search left subtree
root.left = self.deleteNode( root.left, key )
elif root.val < key:
# Target node is larger than currnet node, search right subtree
root.right = self.deleteNode( root.right, key )
else:
# Current node is target node
if (not root.left) or (not root.right):
# At least one child is empty
# Target node is replaced by either non-empty child or None
root = root.left if root.left else root.right
else:
# Both two childs exist
# Target node is replaced by smallest element of right subtree
cur = root.right
while cur.left:
cur = cur.left
root.val = cur.val
root.right = self.deleteNode( root.right, cur.val )
return root
if __name__ == "__main__":
root = [5, 3, 6, 2, 4, None, 7]
key = 0
print("deleteNode({0}, {1}): {2}".format(
root, key, Solution.deleteNode(Solution, root, key)))
# 01
# 91 / 91 test cases passed.
# Status: Accepted
# Runtime: 64 ms
# Memory Usage: 18.3 MB
| [
"brian.su@cplcloud.com"
] | brian.su@cplcloud.com |
f424a1af31ed5989adad0175193e06d4087950fb | 95a1711ae2c903e10cf8c752341c2780f9674738 | /implementation3.py | 70ebe5eebb189c4e12fc099a650f693d11cfe4af | [] | no_license | aaronsamuel137/legal-scrape | fb305e7575bdcc41899850edbf5c70fc895beaac | ae06a7a68c8e159a670a8837878bc4ef5f74e02e | refs/heads/master | 2016-09-05T09:24:25.006822 | 2014-04-16T16:50:39 | 2014-04-16T16:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,371 | py | from multiprocessing import Process
from multiprocessing.managers import BaseManager
from concurrentqueue import ConcurrentQueue
from spider import Spider
from BeautifulSoup import BeautifulSoup
import os
import random
import json
import requests
import re
import time
import redis
import pickle
from pymongo import MongoClient
NUM_PROCESSES = 4
# open an error log
log = open('error.log', 'w')
# set up database connection
client = MongoClient('localhost', 27017)
db = client.legal_db
collection = db.legal
# regex for getting the links to the actual data from the main link
link_re = re.compile("http://www\.legis\.state\.pa\.us//WU01/LI/LI/CT/HTM/[0-9]+/[0-9].*\'")
def parse_urls(red_serve):
"""
The main parsing function. Takes a concurrent queue as an argument.
"""
s = requests.Session()
while int(red_serve.llen('urls')) > 0:
item = red_serve.rpop('urls')
item = pickle.loads(item)
# keep dequing items until the queue is empty
# print 'process {} parsing url {}'.format(os.getpid(), item['link'])
# print 'queue size is', q.get_size()
r = s.get(item['link'])
# try to find link to data using regex
matches = link_re.findall(r.text)
if len(matches) > 0:
link = matches[0].replace("'", '')
r2 = s.get(link)
soup = BeautifulSoup(r2.text)
# try to find text surrounded by pre tag
# this applies to some documents and not others
pre = soup.find('pre')
if pre is not None:
data = pre.getText()
item['data'] = data.strip()
# otherwise, just get all the p tags
else:
try:
ps = soup.findAll('p')
text = ''
for p in ps:
text += (p.getText() + '\n')
item['data'] = text.strip()
except Exception as e:
log.write('error occured: ' + str(e))
log.write('url is ' + str(link))
try:
collection.insert(item)
except:
log.write('error adding item to database')
else:
log.write('data link not found in page ' + item['link'])
def crawl(url, r_server):
"""
Starts a spider crawling for all the useful urls in this domain and adding them
to the shared queue. After the spider finishes, this processes starts parse the
urls along with the other processes.
"""
Spider().crawl(url, r_server)
parse_urls(r_server)
def main(url):
url = "http://www.legis.state.pa.us/cfdocs/legis/LI/Public/cons_index.cfm"
# start the redis server
r = redis.StrictRedis(host='localhost', port=6379, db=0)
# start a spider crawling for urls
p = Process(target=crawl, args=(url, r, ))
p.start()
# start other processes for parsing the urls
processes = []
for i in range(NUM_PROCESSES-1):
processes.append(Process(target=parse_urls, args=(r, )))
# wait until some items are in the queue before starting the parsing threads
while int(r.llen('urls')) < 1:
pass
for i in range(NUM_PROCESSES-1):
processes[i].start()
for i in range(NUM_PROCESSES-1):
processes[i].join()
p.join()
main()
| [
"aaron.davis.samuel@gmail.com"
] | aaron.davis.samuel@gmail.com |
249ce324bde793fd41492fa2f8d1d0c2ce88c9cd | ed97fb5c71da7ed89235432e3971bb0ef6064f8b | /algorithms/python/290.py | 3c1bbff0685733f3cd42f905b78b0d011cbfcd85 | [
"MIT"
] | permissive | viing937/leetcode | 8241be4f8bc9234a882b98ada2e5d13b0ebcca68 | b07f7ba69f3d2a7e294f915934db302f43c0848f | refs/heads/master | 2023-08-31T18:25:06.443397 | 2023-08-31T15:31:38 | 2023-08-31T15:31:38 | 37,374,931 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | class Solution(object):
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
arr = str.split(' ')
if len(pattern) != len(arr):
return False
hashmap = {}
for i in range(len(pattern)):
if pattern[i] in hashmap.keys() and hashmap[pattern[i]] != arr[i]:
return False
hashmap[pattern[i]] = arr[i]
if hashmap.values().count(arr[i]) > 1:
return False
return True
| [
"viing937@gmail.com"
] | viing937@gmail.com |
b6d4e48bf52987ff27d8fbcd9b632965f79c0c57 | 84b05c3de823110c73d8a408ba646fdda0d6471d | /model/input_embedding.py | f0356ae3dd8d2ac478d35ed6c617a0d20be23064 | [
"MIT"
] | permissive | hyzcn/pmn_demo | dfe69a23a9cd0139ba0e14c2d7002b03173c4b67 | c3c60e55927e982f90cfa10dffd42584bf712616 | refs/heads/master | 2020-04-26T23:32:29.460862 | 2019-01-30T15:34:04 | 2019-01-30T15:34:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 755 | py | import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import math
from torch.autograd import Variable
import sys, os
sys.path.insert(0, '..')
class InputEmbedding(nn.Module):
def __init__(self, vocab_size, vec_dim=300, glove_wemb=None):
super(InputEmbedding, self).__init__()
self.name = 'InputEmbedding'
self.vocab_size = vocab_size
self.vec_dim = vec_dim
self.wembed = nn.Embedding(self.vocab_size+1, self.vec_dim)
if glove_wemb is not None:
self.wembed.weight.data.copy_(torch.from_numpy(glove_wemb))
def forward(self, inds):
inds = inds.type(torch.LongTensor).cuda(0)
wvec = self.wembed(inds)
return wvec
| [
"seungwookkim@Seungs-MacBook-Pro.local"
] | seungwookkim@Seungs-MacBook-Pro.local |
f61dda7d7991beabe13b928194d318a49672a28e | 05b265cf6359e99f3953a827cc6b1f7c74418ef7 | /predict.py | 25b91d2500f57fb3fa0b3c4461abdd4878c0a53e | [] | no_license | ChunChiehHuang18/Image-Sentiment-Classification | e42259c4d3014aad7cd5dd1d13dd131241fb7183 | bbf99823bc31c2e7866f9350d3953b1f5acaebe4 | refs/heads/master | 2020-04-13T15:17:15.984956 | 2018-12-27T11:35:19 | 2018-12-27T11:35:19 | 163,286,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,089 | py | from tensorflow import keras
import pandas as pd
import numpy as np
import csv
TARGET_MODEL = 'Best/64%_0.9799_VGG-7/cp-005-0.65.h5'
print('Loading ' + TARGET_MODEL + ' model')
model = keras.models.load_model(TARGET_MODEL)
model.summary()
#test_loss, test_acc = model.evaluate(validate_data, validate_labels, batch_size=32)
#print('Test accuracy:', test_acc)
print('Predict test data')
# Read Test CSV
test_csv_set = pd.read_csv('test.csv', names=('index', 'features'))
test_csv_set = test_csv_set[1:] # drop column name
test_data = []
for data in test_csv_set['features']:
test_data.append(np.array(data.split(' ')).astype(np.int))
test_data = np.array(test_data)
test_data = test_data.reshape([-1, 48, 48, 1]) / 255.0
predict_output = model.predict(test_data)
test_output = list()
for output in predict_output:
test_output.append(np.argmax(output))
with open('predict.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['id', 'label'])
i = 1
for output in test_output:
writer.writerow([i, output])
i += 1
| [
"ChunChieh_Huang@asus.com"
] | ChunChieh_Huang@asus.com |
101a8894fb2fcb5f7139da788cb6cf2e8c2aaa6d | 407238eb7639325caddeb87d056fac2e32e707ec | /cli/parser/volt/volt.py | dcf9a30e84c3204023bba6c62a890353cdca4bc7 | [] | no_license | penlooktmp/cmd | a6383a8b0a08977acfd47adb51d8857474c0f3da | c63802b866e3c15a2ba24609241bebaad1a3451e | refs/heads/master | 2021-01-21T02:21:23.925366 | 2015-09-06T08:03:10 | 2015-09-06T08:03:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,458 | py | #!/usr/bin/python
#
# Pengo Project
#
# Copyright (c) 2015 Penlook Development Team
#
# --------------------------------------------------------------------
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program.
# If not, see <http://www.gnu.org/licenses/>.
#
# --------------------------------------------------------------------
#
# Authors:
# Loi Nguyen <loint@penlook.com>
from php import *
import re
class Volt:
def __init__(self):
pass
def renderString(self, template, data):
content = ''
start = 0
end = -1
for match in re.finditer(r"\{\{[a-zA-Z0-9_\s]+\}\}", template):
end = match.start()
if start < end:
content += template[start:end]
start = end + 1
var_block = template[match.start():match.end()]
var = re.split("\s+", var_block)[1]
if var in data:
content += data[var]
start = match.end()
content += template[start:]
content = content.replace('"', '\"')
return content
def compileAll(self, cppHTMLPath):
templateCPP = """// AUTO GENERATED
{{ viewHeader }}
namespace app {
namespace view {
void {{ funcName }}(View *view) {
{{ variableHeader }}
{{ htmlContent }}
}\n}\n}"""
cppEmbedded = "";
with open(cppHTMLPath, "r") as lines:
for line in lines:
line = line.strip()
if len(line) > 0:
cppEmbedded += line
cppEmbedded = "<?cpp ?>" + cppEmbedded
cppSegments = cppEmbedded.split("<?cpp")
cppContent = ""
for cppSegment in cppSegments:
cppArr = cppSegment.split("?>");
cppContent += cppArr[0].strip() + '\n';
if len(cppArr) == 2:
cppContent += 'view->stream += "' + cppArr[1].strip().replace('"', '\\"') + '";\n'
cppPath = cppHTMLPath.split(".html")[0]
cpp = open(cppPath, 'w')
variableHeader = ""
for variable in self.getData()["variables"]:
variableHeader += variable['Type'] + ' ' + variable['Name'] + ' = view->getData()->get<' + variable['Type'] + '>("' + variable['Name'] + '");\n'
cpp.write(self.renderString(templateCPP, {
'htmlContent' : cppContent.strip(),
'variableHeader' : variableHeader.strip(),
'viewHeader' : self.data["viewHeader"],
'funcName' : self.data["funcName"]
}))
cpp.close()
def setData(self, data):
self.data = data
def getData(self):
return self.data
def generateHeader(self, viewPath, viewStack):
viewHeaderTemplate = """// AUTO GENERATED
#include <sys/type.h>
#include <sys/func.h>
#include <app/view.h>
namespace app {
namespace view {
{{ headerContent }}
}\n}"""
cpp = open(viewPath + "/view.h", 'w')
headerContent = ''
for viewHeader in viewStack:
headerContent += 'void ' + viewHeader + "(View *view);\n"
cpp.write(self.renderString(viewHeaderTemplate, {
'headerContent' : headerContent
}))
cpp.close()
def compile(self, target, dest):
php = PHP("")
code = '(new Volt\Compiler())->compileFile'
code += "('" + target +"', '" + dest + "');"
php.get_raw(code)
self.compileAll(dest) | [
"loint@penlook.com"
] | loint@penlook.com |
f5fccd5cf37b249aa0bd6ec0df11050ccceac4ba | 226b1c73a706f4734834196d18305d4d2c873589 | /synlib/descriptions/INVX12.py | d9548d8ad876fcb48facd24d7fd0a2450a47ae9a | [] | no_license | ocakgun/vlsistuff | 43b4b07ae186b8d2360d11c57cd10b861e96bcbe | 776c07f5d0c40fe7d410b5c85e7381017d4dab64 | refs/heads/master | 2022-06-13T14:40:22.641310 | 2020-05-08T11:09:00 | 2020-05-08T11:09:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | Desc = cellDescClass("INVX12")
Desc.properties["cell_leakage_power"] = "3253.878540"
Desc.properties["cell_footprint"] = "inv"
Desc.properties["area"] = "43.243200"
Desc.pinOrder = ['A', 'Y']
Desc.add_arc("A","Y","combi")
Desc.set_job("inv") # (!A)
Desc.add_param("area",43.243200);
Desc.add_pin("A","input")
Desc.add_pin("Y","output")
Desc.add_pin_func("Y","unknown")
CellLib["INVX12"]=Desc
| [
"greenblat@mac.com"
] | greenblat@mac.com |
26c9c98daf102bdfdc926aacc59124df803a03cd | d1ef7fb88284dc8e108a9e38023b863f2b8d605e | /apps/demo/models.py | c90779b8bbab12d5ddc1e594db26763756665749 | [] | no_license | jcaladlean-tech/lean_tech_backend_test | 27cbb2803cca1b37fc4411f45aa7e9d63409e0f8 | 82faea2500af97756061fc7d5297d00494ac0eea | refs/heads/master | 2021-09-30T00:05:37.572493 | 2020-04-06T01:41:44 | 2020-04-06T01:41:44 | 253,083,319 | 0 | 0 | null | 2021-09-22T18:49:55 | 2020-04-04T19:32:25 | Python | UTF-8 | Python | false | false | 1,090 | py | from django.db import models
class Carrier(models.Model):
"""docstring for Carrier"""
scac = models.CharField(max_length=30)
name = models.CharField(max_length=30)
MC = models.IntegerField(null=True, blank=True)
DOT = models.IntegerField(null=True, blank=True)
FEIN = models.IntegerField(null=True, blank=True)
class Shipment(models.Model):
"""docstring for Shipment"""
date = models.DateTimeField(auto_now_add=True)
origin_country = models.CharField(max_length=30)
origin_state = models.CharField(max_length=30)
origin_city = models.CharField(max_length=30)
destination_country = models.CharField(max_length=30)
destination_state = models.CharField(max_length=30)
destination_city = models.CharField(max_length=30)
pick_up_date = models.DateTimeField(null=True, blank=True)
delivery_date = models.DateTimeField(null=True, blank=True)
status = models.CharField(max_length=20)
carrier_rate = models.DecimalField(max_digits=20, decimal_places=2)
carrier_id = models.ForeignKey(Carrier, on_delete=models.CASCADE)
| [
"juanpablo.calad@gmail.com"
] | juanpablo.calad@gmail.com |
66cba7b1d697df1b112e0741f078b2d82f7853cf | a0801d0e7325b31f0383fc68517e208680bb36d6 | /ProjectEuler/113.py | 67180d3f1e179379f2c22641ec3d5bb209b71d03 | [] | no_license | conormccauley1999/CompetitiveProgramming | bd649bf04438817c7fa4755df2c2c7727273b073 | a7e188767364be40f625612af3d16182f2d8d4de | refs/heads/master | 2023-05-14T13:19:32.678134 | 2023-05-11T16:07:33 | 2023-05-11T16:07:33 | 179,089,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | # Problem 113
def cnt(length, inc):
end = 10 if inc else -1
step = 1 if inc else -1
dp = []
dp.append([1] * 10)
dp[0][0] = 0
for _ in range(length - 1):
dp.append([0] * 10)
for cur_position in range(1, length):
for cur_digit in range(10):
for next_digit in range(cur_digit, end, step):
dp[cur_position][cur_digit] += dp[cur_position - 1][next_digit]
return sum(dp[length - 1])
print(sum(cnt(i, True) + cnt(i, False) - 9 for i in range(1, 101)))
| [
"conormccauley1999@gmail.com"
] | conormccauley1999@gmail.com |
214a8a89374011f125649cb61730f186d5928be4 | 6ea2ed800fd3d014dbc713f89d4f1de73a9047a8 | /app/user/user_comments/views.py | d4fa26f3c8721226de8a8564849a1df7bac47286 | [] | no_license | kishoresvk21/tech_blog_flask | d805f7d773cfd00f9c0b891b73c43c4b56567b89 | 6f16450ca09d726d49faf33abbc67f1c37fc5f3a | refs/heads/master | 2023-08-15T16:41:47.607908 | 2021-10-22T19:23:25 | 2021-10-22T19:23:25 | 419,964,851 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,078 | py | from flask import request,jsonify
from app import app,db
from datetime import datetime
from flask_restplus import Resource
from app.authentication import authentication
from app.models_package.models import User, Queries, Comments
from app.serializer import comments_serializer
from app.pagination import get_paginated_list
from app.authentication import get_user_id
class CommentCRUD(Resource):
@authentication
def post(self):
data = request.get_json() or {}
if not data:
app.logger.info("No input(s)")
return jsonify(status=400, message="No input(s)")
query_id = data.get('query_id')
user_id = data.get('user_id')
queries_check = Queries.query.filter_by(id=query_id).first()
user_check = db.session.query(User).filter_by(id=user_id).first()
if not (queries_check and user_check):
app.logger.info("Query_id or user_id not found or not entered")
return jsonify(status=404, message="Query_id or user_id not found or not entered")
comment = data.get('comment')
if not (query_id and user_id and comment):
app.logger.info("query_id,user_id and comment are required")
return jsonify(status=400, message="query_id,user_id and comment are required")
today = datetime.now()
date_time_obj = today.strftime('%Y/%m/%d %H:%M:%S')
comm = Comments(user_id, query_id, comment, date_time_obj, date_time_obj)
db.session.add(comm)
db.session.commit()
app.logger.info("comment inserterd succesfully")
return jsonify(status=200, message="comment inserterd succesfully")
@authentication
def put(self):
data = request.get_json() or {}
if not data:
app.logger.info("No input(s)")
return jsonify(status=400, message="No input(s)")
try:
query_id = data.get('query_id')
user_id = data.get('user_id')
comment_id = data.get('comment_id')
edit_comment_by_id = db.session.query(Comments).filter_by(id=comment_id).first()
check_user = db.session.query(User).filter_by(id=user_id).first()
check_queries_auth = db.session.query(Queries).filter_by(u_id=user_id).first()
except:
app.logger.info("comment/user/query not found")
return jsonify("comment/user/query not found")
edited_comment = data.get('edited_comment')
if not (query_id and user_id and edited_comment and comment_id):
app.logger.info("query_id , user_id , edited_comment and comment_id are required fields")
return jsonify(status=400, message="query_id , user_id , edited_comment and comment_id are required fields")
if not (check_queries_auth or check_user != 1):
app.logger.info("cant edit comment")
return jsonify(status=404, message="cant edit comment")
if not edit_comment_by_id:
app.logger.info("Comment not found")
return jsonify(status=400, message="Comment not found")
if not ((edit_comment_by_id.u_id == user_id) or check_user.role != 1):
app.logger.info("User not allowed to edit")
return jsonify(status=404, message="User not allowed to edit")
edit_comment_by_id.msg = edited_comment
db.session.commit()
app.logger.info("Comment edited")
return jsonify(status=200, message="Comment edited",
data={"query_id": query_id, "comment_id": comment_id, "edited_comment": edited_comment})
@authentication
def delete(self):
data = request.get_json() or {}
if not data:
app.logger.info("No input(s)")
return jsonify(status=400, message="No input(s)")
query_id = data.get('query_id')
user_id = data.get('user_id')
comment_id = data.get('comment_id')
if not (query_id and user_id and comment_id):
app.logger.info("comment_id , user_id and query_id are required")
return jsonify(status=200, message="Query_id , user_id and query_id are required")
query_check = Queries.query.filter_by(id=query_id).first()
user_check = User.query.filter_by(id=user_id).first()
if not user_check:
app.logger.info("User not found")
return jsonify(status=400, message="User not found")
if not query_check:
app.logger.info("Query not found")
return jsonify(status=400, message="Query not found")
comment_check = Comments.query.filter_by(id=comment_id).first()
if not comment_check:
app.logger.info("Comment not found")
return jsonify(status=400, message="Comment not found")
if not ((comment_check.u_id == user_id) or user_check.roles != 1):
app.logger.info("User not allowed to delete")
return jsonify(status=404, message="User not allowed to delete")
db.session.delete(comment_check)
db.session.commit()
app.logger.info("Comment deleted successfully")
return jsonify(status=200, message="Comment deleted successfully")
def get(self): # send all the comments based on comment_id or u_id or q_id or send all
order_by_comment_obj = db.session.query(Comments).order_by(Comments.updated_at)
if not order_by_comment_obj:
app.logger.info("No Comments in DB")
return jsonify(status=404, message="No comments in DB")
c_list = []
for itr in order_by_comment_obj:
user_name = User.query.filter_by(id=itr.u_id).first()
dt = comments_serializer(itr,itr.u_id)
dt['name'] = user_name.name
c_list.append(dt)
app.logger.info("Return comments data")
return jsonify(status=200, data=get_paginated_list(c_list, '/comment', start=request.args.get('start', 1),
limit=request.args.get('limit', 3),with_params=False),
message="Returning comments data")
class GetCommentByQuery(Resource):
@authentication
def get(self):
user_id=get_user_id(self)
query_id=request.args.get('query_id')
comment_obj = Comments.query.filter_by(q_id=query_id).all()
if not comment_obj:
app.logger.info("No Comments found")
return jsonify(status=404, message="No comments found")
comment_list = []
page = f"/getcomments/query?query_id={query_id}"
for itr in comment_obj:
dt = comments_serializer(itr, int(user_id))
comment_list.append(dt)
app.logger.info("Return comments data")
return jsonify(status=200, data=get_paginated_list(comment_list, page,start=request.args.get('start', 1),
limit=request.args.get('limit', 3),with_params=True),message="Returning queries data")
#My Contributions
class GetCommentsByUserId(Resource):
def get(self, user_id): # send all the comments based on user_id
try:
c_list = []
comments_obj = Comments.query.filter_by(u_id=user_id).all()
if not comments_obj:
app.logger.info("No Comments in DB")
return jsonify(status=404, message="No comments in DB")
for itr in comments_obj:
if itr.u_id == user_id:
dt = comments_serializer(itr,itr.u_id)
c_list.append(dt)
user_id_str = str(user_id)
page = '/getcomments/user/' + user_id_str
app.logger.info("Return comments data")
return jsonify(status=200, data=get_paginated_list(c_list, page, start=request.args.get('start', 1),
limit=request.args.get('limit', 3),with_params=False),
message="Returning comments data")
except:
return jsonify(status=400, message="No inputs found")
| [
"svkrishnakishore2000@gmail.com"
] | svkrishnakishore2000@gmail.com |
b4957067aa8c6473420b3275cc28d7451f8b51dc | 3b74f69fb2e244df8758c7f658c49a27ba404a4c | /media/migrations/0001_initial.py | 200a385c80e25d067bd27ef201dbda32e67235b9 | [] | no_license | rummansadik/Social-Media | f5eb82985f551d99c5990930402e47db01f2f47c | d3e3b0b5e979a3943154235c569f36af5d6b9d43 | refs/heads/master | 2023-08-28T08:21:32.130743 | 2021-10-03T06:32:27 | 2021-10-03T06:32:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 910 | py | # Generated by Django 3.2.5 on 2021-07-17 05:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"rummansadik@gmail.com"
] | rummansadik@gmail.com |
e5752efd326b351a01e04ea38dbc3983966cfb74 | a7cc210b0a8b3b7526e1364c7304c07824437f17 | /graphdata/serializers.py | 2e4842c49b52993e419f962a332f5079cd72fec9 | [] | no_license | aryamanpsingh/Stat-Comparison-Stat90 | ac8c9a25878436e6abbe62338614b985890e1366 | a5d2dc52b93e85352d39fcfcb57f39b0ca51e247 | refs/heads/master | 2022-12-14T02:27:22.577667 | 2021-03-30T21:03:58 | 2021-03-30T21:03:58 | 206,934,842 | 1 | 0 | null | 2022-04-22T22:21:21 | 2019-09-07T07:51:21 | JavaScript | UTF-8 | Python | false | false | 200 | py | from rest_framework import serializers, permissions
from .models import Player
class PlayerSerializer(serializers.ModelSerializer):
class Meta:
model = Player
fields = '__all__'
| [
"aps010@ucsd.edu"
] | aps010@ucsd.edu |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.