blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2c1805d0145a779f1ed3d32b9e1c05c6bc8a5136 | 579b0d3df09af1341f3c83bf68d46212aa98dbc6 | /src/CsvDataImport.py | 6d0ce182546d044a7a568218e1c80cc374647db8 | [] | no_license | phowson/financialtsgan | 9234b18865eb9608a3e2425f5c8bdfdd0fdbd0a5 | eaf9bf689c1ab8bf1856c2b0601c3ad6169a4e32 | refs/heads/master | 2021-08-17T00:59:10.769735 | 2017-11-20T16:23:16 | 2017-11-20T16:23:16 | 108,844,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | py | '''
Created on 30 Oct 2017
@author: phil
'''
import csv
from datetime import datetime
def loadCsv(fileName):
timeSeries = [];
with open(fileName) as csvfile:
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
topRow = spamreader.__next__();
dateCol = 0;
priceCol = 0;
for x,y in enumerate(topRow):
if y == 'Date':
dateCol = x;
if y == 'Price':
priceCol = x;
#print("Date column = " + str(dateCol));
#print("Price column = " + str(priceCol));
for row in spamreader:
ds = row[dateCol];
if len(ds.strip())==0:
break;
d = datetime.strptime(ds, '%b %d, %Y')
p = float(row[priceCol]);
timeSeries.append( (d,p) );
return sorted(timeSeries);
| [
"phil@howson.net"
] | phil@howson.net |
d5e6beb44c4d3eabfbc1f90c7e6154546b5390be | 3a85089c2498ff04d1b9bce17a4b8bf6cf2380c9 | /RecoMuon/TrackingTools/python/__init__.py | 46c2d8c095c7740e0d099c19bde145fc026b6c15 | [] | no_license | sextonkennedy/cmssw-ib | c2e85b5ffa1269505597025e55db4ffee896a6c3 | e04f4c26752e0775bd3cffd3a936b288ee7b0268 | HEAD | 2016-09-01T20:09:33.163593 | 2013-04-26T12:05:17 | 2013-04-29T16:40:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 196 | py | #Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/RecoMuon/TrackingTools/',1)[0])+'/cfipython/slc6_amd64_gcc480/RecoMuon/TrackingTools')
| [
"giulio.eulisse@cern.ch"
] | giulio.eulisse@cern.ch |
248163892715e31f2251a0a927171b66305529c7 | 58861c1f59ecfdd234a0c6e926e50a3797570fe8 | /users/templatetags/teacher_extras.py | 9c94f70595a35331073a1b89183c796b46532ee0 | [] | no_license | edgeofstorm/django-sis | 6e4dec61b807e6be5b625999917cdd275de95270 | 4e5177c1da36a702634b954924b58e6ba2a34200 | refs/heads/master | 2022-10-24T15:06:52.244324 | 2020-06-09T06:53:28 | 2020-06-09T06:53:28 | 269,437,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 311 | py | from django import template
register = template.Library()
@register.filter
def label(value):
if value == 'M':
return 'Matematik'
if value == 'S':
return 'Sosyal'
if value == 'F':
return 'Fen'
if value == 'T':
return 'Turkce'
else:
return 'Brans yok' | [
"enesekinci1907@gmail.com"
] | enesekinci1907@gmail.com |
bf7af35152b7b2dba2930ac7e466f08228b4a99b | 5bbb1d717733ce136f696f0216451730d94a4117 | /script/link/rotate_link.py | 0b04dd7d799bf56cf0c631472e0cb116b449726f | [] | no_license | hongliang5623/vanyar | 8e5ffd2c2422c8bb844ce09bfccc54261e84c5be | 8945ae4353f9d32d387b102effa1202a9dfcea4c | refs/heads/master | 2020-12-14T01:17:24.352024 | 2019-10-28T15:17:39 | 2019-10-28T15:17:39 | 45,191,805 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,965 | py | # -*- coding: utf-8 -*-
from utils import ListNode
head1 = ListNode(2)
n1 = ListNode(3)
n2 = ListNode(3)
n3 = ListNode(4)
n4 = ListNode(4)
n5 = ListNode(9)
head1.next = n1
n1.next = n2
n2.next = n3
n3.next = n4
n4.next = n5
print 'source------------->>'
source = head1
while source:
print source.val
source = source.next
print 'run ------------->>'
def rotateRight(head, k):
if head == None:
return head
dummy = ListNode(0)
dummy.next = head
p = dummy
count = 0
while p.next:
p = p.next
count += 1
p.next = dummy.next # 构成环装链表
step = count - ( k % count )
if k == 0 or step == 0:
return head
for i in range(0, step):
print p.val
p = p.next
print 'head:%s, head.next:%s, p.next:%s, p.next.next:%s' % (
head.val, head.next.val, p.next.val, p.next.next.val)
head = p.next
p.val = 100000
p.next = None # 从P处断开环
return head
def rotateRight2(head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head:
return None
if not head.next:
return head
pointer = head
count = 1
while pointer.next:
pointer = pointer.next
count+=1
rotateTimes = k%count
print("count is ", count)
print('rotate is', rotateTimes)
if not k or not rotateTimes:
return head
slow = fast = head
for i in range(rotateTimes-1):
fast = fast.next
tmp = ListNode(0)
while fast.next:
tmp = slow
slow = slow.next
fast = fast.next
print 'tmp before slow:%s, fast:%s, slow:%s' % (tmp.val, fast.val, slow.val)
tmp.next = None # 断开旋转点之前的指针指向,保证slow为头部
fast.next = head # 末尾指向头部
return slow
result = rotateRight2(head1, 2)
print 'result---------->>>>'
while result:
print result.val
result = result.next
| [
"zhanghliang5623@126.com"
] | zhanghliang5623@126.com |
30cf962896b81a1ff5a5aea7a5879beda8bb74af | d79da91814263cd6e6c926ed857628c63f6069bf | /Instalador.py | faf3949a9faae20067b6f001bd507fe63eccc97c | [] | no_license | xXYouTuberXx/Install | ee536a5f308421e1e8646dd515d7a69645dbcee8 | 253cb7870bf6bd573156dbca1a7080078badda5a | refs/heads/master | 2020-08-21T23:50:12.002581 | 2019-10-19T21:54:17 | 2019-10-19T21:54:17 | 216,275,127 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | import os
import colorama
from colorama import Fore
os.system("cls")
print(Fore.CYAN + "Bienvenido A La Tool Para Descargar Python3")
os.system("yum install centos-release-scl -y")
os.system("clear")
os.system("yum info rh-python35")
os.system("clear")
os.system("yum install rh-python35 -y")
os.system("clear")
os.system("scl enable rh-python35 bash")
os.system("clear")
os.system("scl -l")
os.system("clear")
os.system("pip3 install colorama")
os.system("clear")
os.system("pip3 install cfscrape")
os.system("clear")
| [
"noreply@github.com"
] | xXYouTuberXx.noreply@github.com |
f11d53867f5e9df655f10499cc0eab4526895b30 | a8c4e40349f357dae3c0f2398504d3354762ef26 | /scripts/xsBrReader.py | c9d49a90efc156cc0cec8a8bdaec4887e2543200 | [] | no_license | andreypz/nwu-dalitz-analysis | 3bab901f5b30d7e74c7ee0954906bd9d1c36c564 | b2c15ba08910bc6298319ba7b5b59d34249e7d7a | refs/heads/master | 2020-04-15T07:50:20.678794 | 2016-02-04T16:56:40 | 2016-02-04T16:56:40 | 11,186,577 | 0 | 0 | null | 2018-07-25T12:52:27 | 2013-07-04T21:05:07 | C | UTF-8 | Python | false | false | 2,442 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import csv
#from array import *
class AutoVivification(dict):
"""Implementation of perl's autovivification feature."""
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
xsDict = AutoVivification()
xsScaleErrDict = AutoVivification()
xsPDFErrDict = AutoVivification()
YR = 'YR3'
TeV = '8TeV'
xsPrecision = '%.4f'
spamReader = csv.reader(open('../data/Higgs_XSBR_'+YR+'_SM_'+TeV+'.csv', 'rb'), delimiter=',')
for i, row in enumerate(spamReader):
#if i<10: print i, row[0],row[1],row[8],row[15],row[24],row[32]
#if i<10: print i, row[0],row[1],row[2],row[3],row[4],row[5]
#if i<10: print i, row[0],row[8],row[9],row[10],row[11],row[12]
#if i<10: print i, row[0],row[15],row[16],row[17],row[18],row[19]
#if i<10: print i, row[0],row[24],row[25],row[26],row[27],row[28]
if i < 5: continue
if i > 289: break
mass = '%.1f'% float(row[0])
xsDict[YR][TeV]['ggF'][mass] = xsPrecision % float(row[1])
xsScaleErrDict[YR][TeV]['ggF'][mass] = '%.3f/%.3f' % (1-0.01*abs(float(row[3])),1+0.01*abs(float(row[2])))
xsPDFErrDict[YR][TeV]['ggF'][mass] = '%.3f/%.3f' % (1-0.01*abs(float(row[5])),1+0.01*abs(float(row[4])))
xsDict[YR][TeV]['VBF'][mass] = xsPrecision % float(row[8])
xsScaleErrDict[YR][TeV]['VBF'][mass] = '%.3f/%.3f' % (1-0.01*abs(float(row[10])),1+0.01*abs(float(row[9])))
xsPDFErrDict[YR][TeV]['VBF'][mass] = '%.3f/%.3f' % (1-0.01*abs(float(row[12])),1+0.01*abs(float(row[11])))
xsDict[YR][TeV]['WH'][mass] = xsPrecision % float(row[15])
xsScaleErrDict[YR][TeV]['WH'][mass] = '%.3f/%.3f' % (1-0.01*abs(float(row[17])),1+0.01*abs(float(row[16])))
xsPDFErrDict[YR][TeV]['WH'][mass] = '%.3f/%.3f' % (1-0.01*abs(float(row[19])),1+0.01*abs(float(row[18])))
xsDict[YR][TeV]['ZH'][mass] = xsPrecision % float(row[24])
xsScaleErrDict[YR][TeV]['ZH'][mass] = '%.3f/%.3f' % (1-0.01*abs(float(row[26])),1+0.01*abs(float(row[25])))
xsPDFErrDict[YR][TeV]['ZH'][mass] = '%.3f/%.3f' % (1-0.01*abs(float(row[28])),1+0.01*abs(float(row[27])))
xsDict[YR][TeV]['ttH'][mass] = xsPrecision % float(row[32])
print 'The end of CSV file reader'
if __name__ == "__main__":
sig = 'WH'
m = '125.0'
print 'xs = ', xsDict[YR][TeV][sig][m]
print 'scale = ', xsScaleErrDict[YR][TeV][sig][m]
print 'pdf = ', xsPDFErrDict[YR][TeV][sig][m]
| [
"Andrey.Pozdnyakov@cern.ch"
] | Andrey.Pozdnyakov@cern.ch |
df19118034c3cc0c1d7999accde9e65493bf98dd | 137330c75d485f5a258514f377c17b04616a9fe0 | /todo/migrations/0002_auto_20170705_1011.py | cedc9faeb7ebbb3060f413b4276289b72ac3d807 | [] | no_license | asamolion/django-todo | 5aca2a99d85f52e9c2680f2c96ce01bc8ba75477 | 9ffa93c75f27de8facaca2df20f9d21102237a0c | refs/heads/master | 2021-05-23T06:07:41.163813 | 2019-02-15T18:43:29 | 2019-02-15T18:43:29 | 94,775,564 | 0 | 0 | null | 2019-02-15T18:43:30 | 2017-06-19T12:43:06 | Python | UTF-8 | Python | false | false | 589 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-05 10:11
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('todo', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='todoitem',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='todoitems', to=settings.AUTH_USER_MODEL),
),
]
| [
"osama.arshad@arbisoft.com"
] | osama.arshad@arbisoft.com |
afa46d68ecf6d61c6df7864fbb08ae004dd62027 | 47a3a59288792f654309bfc9ceb6cbfa890720ef | /ramda/pick_all_test.py | c1dcb75e4e093273553227d190646a1b2ddff6d4 | [
"MIT"
] | permissive | jakobkolb/ramda.py | 9531d32b9036908df09107d2cc19c04bf9544564 | 982b2172f4bb95b9a5b09eff8077362d6f2f0920 | refs/heads/master | 2023-06-23T00:46:24.347144 | 2021-02-01T16:47:51 | 2021-02-01T16:48:25 | 388,051,418 | 0 | 0 | MIT | 2021-07-21T16:31:45 | 2021-07-21T08:40:22 | null | UTF-8 | Python | false | false | 317 | py | from ramda import *
from ramda.private.asserts import *
def pick_all_test():
assert_equal(
pick_all(["a", "d"], {"a": 1, "b": 2, "c": 3, "d": 4}), {"a": 1, "d": 4}
)
assert_equal(
pick_all(["a", "e", "f"], {"a": 1, "b": 2, "c": 3, "d": 4}),
{"a": 1, "e": None, "f": None},
)
| [
"slava.ganzin@gmail.com"
] | slava.ganzin@gmail.com |
c0451feba3b9104098b17797439c15ddcf7359b4 | eab8b9d305a6f77c206853bed6495ab5d5ecba87 | /eosweb/redditauth/main.py | a2ec860918c419cd05436d4ab30087b50d2b4b98 | [] | no_license | AnswerMeNow1/Gaiaren | 0ea1ce2ab5fb9ffa03dfd66b6fa92555b42c32ab | 4b5177e16b1436f3abed93e668eafad5b9027934 | refs/heads/master | 2021-05-12T04:33:28.791287 | 2018-01-11T13:12:27 | 2018-01-11T13:12:27 | 117,166,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,390 | py | # Eos - Verifiable elections
# Copyright © 2017 RunasSudo (Yingtong Li)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from flask_oauthlib.client import OAuth
import flask
from eos.redditauth.election import *
import base64
import uuid
def main(app):
oauth = OAuth()
reddit = oauth.remote_app('Reddit',
request_token_url=None,
authorize_url='https://www.reddit.com/api/v1/authorize.compact',
request_token_params={'duration': 'temporary', 'scope': 'identity'},
access_token_url='https://www.reddit.com/api/v1/access_token',
access_token_method='POST',
access_token_headers={
'Authorization': 'Basic ' + base64.b64encode('{}:{}'.format(app.config['REDDIT_OAUTH_CLIENT_ID'], app.config['REDDIT_OAUTH_CLIENT_SECRET']).encode('ascii')).decode('ascii'),
'User-Agent': app.config['REDDIT_USER_AGENT']
},
consumer_key=app.config['REDDIT_OAUTH_CLIENT_ID'],
consumer_secret=app.config['REDDIT_OAUTH_CLIENT_SECRET']
)
@app.route('/auth/reddit/login')
def reddit_login():
return reddit.authorize(callback=app.config['BASE_URI'] + flask.url_for('reddit_oauth_authorized'), state=uuid.uuid4())
@reddit.tokengetter
def get_reddit_oauth_token():
return (flask.session.get('user').oauth_token, '')
@app.route('/auth/reddit/oauth_callback')
def reddit_oauth_authorized():
resp = reddit.authorized_response()
if resp is None:
# Request denied
return flask.redirect(flask.url_for('login_cancelled'))
user = RedditUser()
user.oauth_token = resp['access_token']
flask.session['user'] = user
me = reddit.get('https://oauth.reddit.com/api/v1/me', headers={
'User-Agent': app.config['REDDIT_USER_AGENT']
})
user.username = me.data['name']
return flask.redirect(flask.url_for('login_complete'))
| [
"runassudo@yingtongli.me"
] | runassudo@yingtongli.me |
e185236b6376cf931550d58de7dbc40d13c29ad2 | 1e0e610166b36e5c73e7ff82c4c0b8b1288990bf | /scrapy/scrapy28.py | 6f5341d26d3402dea78b84c8a0d584f884360945 | [] | no_license | PythonOpen/PyhonProjects | 4ef1e70a971b9ebd0eb6a09e63e22581ad302534 | ede93314009564c31aa586d2f89ed8b1e4751c1b | refs/heads/master | 2022-05-20T23:21:03.536846 | 2020-04-27T00:59:32 | 2020-04-27T00:59:32 | 250,142,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 141 | py | import re
'''
findall
'''
hello = u'您好,世界!'
s = r'[\u4e00-\u9fa5]+'
pattern = re.compile(s)
m = pattern.findall(hello)
print(m)
| [
"1083138609@qq.com"
] | 1083138609@qq.com |
52bdb915c73f58f6aa5005d772274b3a67ecca7b | fe6ff9ab3f409a9c66748ded944109cdb342a9e1 | /tenspark/tenspark.py | 59ddf7918303641d45a84ec391adfa9f8121416b | [
"Apache-2.0"
] | permissive | saurabh-deochake/Tenspark | 88a2fec80a9580e0f0fa2cbddf1110e7c4afd74c | 68b0ed74bff7aeb564a19aedf14e47948aed66de | refs/heads/master | 2021-08-28T10:50:30.846085 | 2017-12-12T01:40:34 | 2017-12-12T01:40:34 | 109,049,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,154 | py | import numpy as np
import os
import urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import cv2
cap = cv2.VideoCapture(0)
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# ## Object detection imports
# Here are the imports from the object detection module.
# In[3]:
import utils
# from utils import visualization_utils as vis_util
# # Model preparation
# ## Variables
#
# Any model exported using the `export_inference_graph.py` tool can be loaded here simply by changing `PATH_TO_CKPT` to point to a new .pb file.
#
# By default we use an "SSD with Mobilenet" model here. See the [detection model zoo](https://github.com/tensorflow/models/blob/master/object_detection/g3doc/detection_model_zoo.md) for a list of other models that can be run out-of-the-box with varying speeds and accuracies.
# In[4]:
# What model to download.
MODEL_NAME = 'ssd_mobilenet_v1_coco_11_06_2017'
MODEL_FILE = MODEL_NAME + '.tar.gz'
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = "/home/sdeochake/projects/Tenspark/tenspark/object_detection/data/mscoco_label_map.pbtxt" #os.path.join('data', 'mscoco_label_map.pbtxt')
NUM_CLASSES = 90
# ## Download Model
# In[5]:
opener = urllib.FancyURLopener()
opener.retrieve(DOWNLOAD_BASE + MODEL_FILE, MODEL_FILE)
tar_file = tarfile.open(MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd())
# ## Load a (frozen) Tensorflow model into memory.
# In[6]:
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
# In[7]:
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# ## Helper code
# In[8]:
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# # Detection
# In[9]:
# For the sake of simplicity we will use only 2 images:
# image1.jpg
# image2.jpg
# If you want to test the code with your images, just add path to the images to the TEST_IMAGE_PATHS.
PATH_TO_TEST_IMAGES_DIR = 'test_images'
TEST_IMAGE_PATHS = [ os.path.join(PATH_TO_TEST_IMAGES_DIR, 'image{}.jpg'.format(i)) for i in range(1, 3) ]
# Size, in inches, of the output images.
IMAGE_SIZE = (12, 8)
# In[10]:
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
while True:
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
cv2.imshow('object detection', cv2.resize(image_np, (800,600)))
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
| [
"noreply@github.com"
] | saurabh-deochake.noreply@github.com |
c40ac47c517727668db2d5ecdab88a29f78e49cd | df7f13ec34591fe1ce2d9aeebd5fd183e012711a | /hata/discord/application_command/application_command_option_metadata/tests/test__ApplicationCommandOptionMetadataFloat__magic.py | 5f59bf9f28ed267aba57cdf79310534c0cbe8d27 | [
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | HuyaneMatsu/hata | 63e2f6a2d7a7539fd8f18498852d9d3fe5c41d2e | 53f24fdb38459dc5a4fd04f11bdbfee8295b76a4 | refs/heads/master | 2023-08-20T15:58:09.343044 | 2023-08-20T13:09:03 | 2023-08-20T13:09:03 | 163,677,173 | 3 | 3 | Apache-2.0 | 2019-12-18T03:46:12 | 2018-12-31T14:59:47 | Python | UTF-8 | Python | false | false | 2,571 | py | import vampytest
from ...application_command_option_choice import ApplicationCommandOptionChoice
from ..float import ApplicationCommandOptionMetadataFloat
def test__ApplicationCommandOptionMetadataFloat__repr():
"""
Tests whether ``ApplicationCommandOptionMetadataFloat.__repr__`` works as intended.
"""
required = True
autocomplete = True
choices = [ApplicationCommandOptionChoice('19', 19.0), ApplicationCommandOptionChoice('18', 18.0)]
max_value = 10.0
min_value = 20.0
option_metadata = ApplicationCommandOptionMetadataFloat(
required = required,
autocomplete = autocomplete,
choices = choices,
max_value = max_value,
min_value = min_value,
)
vampytest.assert_instance(repr(option_metadata), str)
def test__ApplicationCommandOptionMetadataFloat__hash():
"""
Tests whether ``ApplicationCommandOptionMetadataFloat.__hash__`` works as intended.
"""
required = True
autocomplete = True
choices = [ApplicationCommandOptionChoice('19', 19.0), ApplicationCommandOptionChoice('18', 18.0)]
max_value = 10.0
min_value = 20.0
option_metadata = ApplicationCommandOptionMetadataFloat(
required = required,
autocomplete = autocomplete,
choices = choices,
max_value = max_value,
min_value = min_value,
)
vampytest.assert_instance(hash(option_metadata), int)
def test__ApplicationCommandOptionMetadataFloat__eq():
"""
Tests whether ``ApplicationCommandOptionMetadataFloat.__eq__`` works as intended.
"""
required = True
autocomplete = True
choices = [ApplicationCommandOptionChoice('19', 19.0), ApplicationCommandOptionChoice('18', 18.0)]
max_value = 10.0
min_value = 20.0
keyword_parameters = {
'required': required,
'autocomplete': autocomplete,
'choices': choices,
'max_value': max_value,
'min_value': min_value,
}
option_metadata = ApplicationCommandOptionMetadataFloat(**keyword_parameters)
vampytest.assert_eq(option_metadata, option_metadata)
vampytest.assert_ne(option_metadata, object())
for field_name, field_value in (
('required', False),
('autocomplete', False),
('choices', None),
('max_value', 11.0),
('min_value', 12.0),
):
test_option_metadata = ApplicationCommandOptionMetadataFloat(**{**keyword_parameters, field_name: field_value})
vampytest.assert_ne(option_metadata, test_option_metadata)
| [
"re.ism.tm@gmail.com"
] | re.ism.tm@gmail.com |
1cb3e56193bf9836e2e816dd830b90e36338db8b | 490ffe1023a601760ae7288e86723f0c6e366bba | /kolla-docker/python-zunclient/zunclient/osc/v1/images.py | 4af97886c8048a0d97904f5372e7928a39594407 | [
"Apache-2.0"
] | permissive | bopopescu/Cloud-User-Management | 89696a5ea5d2f95191327fbeab6c3e400bbfb2b8 | 390988bf4915a276c7bf8d96b62c3051c17d9e6e | refs/heads/master | 2022-11-19T10:09:36.662906 | 2018-11-07T20:28:31 | 2018-11-07T20:28:31 | 281,786,345 | 0 | 0 | null | 2020-07-22T21:26:07 | 2020-07-22T21:26:06 | null | UTF-8 | Python | false | false | 5,747 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from osc_lib.command import command
from osc_lib import utils
from zunclient.common import utils as zun_utils
def _image_columns(image):
return image._info.keys()
def _get_client(obj, parsed_args):
obj.log.debug("take_action(%s)" % parsed_args)
return obj.app.client_manager.container
class ListImage(command.Lister):
"""List available images"""
log = logging.getLogger(__name__ + ".ListImage")
def get_parser(self, prog_name):
parser = super(ListImage, self).get_parser(prog_name)
parser.add_argument(
'--marker',
metavar='<marker>',
default=None,
help='The last image UUID of the previous page; '
'displays list of images after "marker".')
parser.add_argument(
'--limit',
metavar='<limit>',
type=int,
help='Maximum number of images to return')
parser.add_argument(
'--sort-key',
metavar='<sort-key>',
help='Column to sort results by')
parser.add_argument(
'--sort-dir',
metavar='<sort-dir>',
choices=['desc', 'asc'],
help='Direction to sort. "asc" or "desc".')
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
opts = {}
opts['marker'] = parsed_args.marker
opts['limit'] = parsed_args.limit
opts['sort_key'] = parsed_args.sort_key
opts['sort_dir'] = parsed_args.sort_dir
opts = zun_utils.remove_null_parms(**opts)
images = client.images.list(**opts)
columns = ('uuid', 'image_id', 'repo', 'tag', 'size')
return (columns, (utils.get_item_properties(image, columns)
for image in images))
class PullImage(command.ShowOne):
"""Pull specified image"""
log = logging.getLogger(__name__ + ".PullImage")
def get_parser(self, prog_name):
parser = super(PullImage, self).get_parser(prog_name)
parser.add_argument(
'image',
metavar='<image>',
help='Name of the image')
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
opts = {}
opts['repo'] = parsed_args.image
image = client.images.create(**opts)
columns = _image_columns(image)
return columns, utils.get_item_properties(image, columns)
class SearchImage(command.Lister):
"""Search specified image"""
log = logging.getLogger(__name__ + ".SearchImage")
def get_parser(self, prog_name):
parser = super(SearchImage, self).get_parser(prog_name)
parser.add_argument(
'--image-driver',
metavar='<image-driver>',
help='Name of the image driver')
parser.add_argument(
'image_name',
metavar='<image_name>',
help='Name of the image')
parser.add_argument(
'--exact-match',
default=False,
action='store_true',
help='exact match image name')
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
opts = {}
opts['image_driver'] = parsed_args.image_driver
opts['image'] = parsed_args.image_name
opts['exact_match'] = parsed_args.exact_match
opts = zun_utils.remove_null_parms(**opts)
images = client.images.search_image(**opts)
columns = ('ID', 'Name', 'Tags', 'Status', 'Size', 'Metadata')
return (columns, (utils.get_item_properties(image, columns)
for image in images))
class ShowImage(command.ShowOne):
"""Describe a specific image"""
log = logging.getLogger(__name__ + ".ShowImage")
def get_parser(self, prog_name):
parser = super(ShowImage, self).get_parser(prog_name)
parser.add_argument(
'uuid',
metavar='<uuid>',
help='UUID of image to describe')
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
opts = {}
opts['id'] = parsed_args.uuid
image = client.images.get(**opts)
columns = _image_columns(image)
return columns, utils.get_item_properties(image, columns)
class DeleteImage(command.Command):
"""Delete specified image"""
log = logging.getLogger(__name__ + ".DeleteImage")
def get_parser(self, prog_name):
parser = super(DeleteImage, self).get_parser(prog_name)
parser.add_argument(
'uuid',
metavar='<uuid>',
help='UUID of image to describe')
return parser
def take_action(self, parsed_args):
client = _get_client(self, parsed_args)
img_id = parsed_args.uuid
try:
client.images.delete(img_id)
print(_('Request to delete image %s has been accepted.')
% img_id)
except Exception as e:
print("Delete for image %(image)s failed: %(e)s" %
{'image': img_id, 'e': e})
| [
"Mr.Qinlichao@hotmail.com"
] | Mr.Qinlichao@hotmail.com |
c451e110590706a8ce83b4af109c550c22964a4e | c1f951f3f5bb0569e114ccf6dd35e40e168e7c08 | /pix2pix_freq_time_domain.py | 41e4ca4677d4fbdb854b701e6993a72a15d57ccf | [] | no_license | uaiu/GAN | b7cf71ca49f78f606761041702a3ac70e1f015cc | d31817a1739e0b5623edff117d157d7ba98efe8d | refs/heads/main | 2023-06-27T13:57:49.962105 | 2021-08-06T07:44:09 | 2021-08-06T07:44:09 | 389,370,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,237 | py | #%%
import tensorflow as tf
import pandas as pd
import os
import time
from matplotlib import pyplot as plt
from IPython import display
import librosa
import numpy as np
#%% spyder용 GPU 유무 확인 코드, Colab은 따로 적용 X
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
#%% 여기는 clean과 noise 데이터를 불러와서 파일명의 리스트를 변수로 설정해주는 코드
clean_path = 'C:/Users/NGV32/.spyder-py3/hyundai_project/data/clean_wav/'
noise_path = 'C:/Users/NGV32/.spyder-py3/hyundai_project/data/mix_wav/'
clean_list_arrange = np.array(os.listdir(clean_path))
noise_list_arrange = np.array(os.listdir(noise_path))
#%% 지금처럼 데이터가 많아졌을때, 여러 차종의 데이터가 있기 때문에 이를 섞기 위해서 섞을 인덱스를 설정
idx = np.arange(clean_list_arrange.shape[0])
np.random.shuffle(idx)
#%% 여기는 인덱스는 항상 랜덤배치이기에 프로그램을 다시실행할때마다 새로설정되는데 이전 인덱스를 불러오고 싶을때 csv파일로 저장한다.
idx_df = pd.DataFrame(idx)
idx_df.to_csv('C:/Users/NGV32/.spyder-py3/hyundai_project/idx.csv', index = False)
#%% 해당 csv파일을 불러온다.
idx_recall = np.array(pd.read_csv('C:/Users/NGV32/.spyder-py3/hyundai_project/idx.csv'))
#%% 랜덤 배치한 idx를 통해서 clean과 noise를 동일한 순서로 재배치 한다.
clean_list = clean_list_arrange[idx_recall[:, 0]]
noise_list = noise_list_arrange[idx_recall[:, 0]]
#%%
def normalization(noise, clean, a, b, Max, Min):
clean_normal = ((b - a)*(clean - Min)) / (Max - Min) + a
noise_normal = ((b - a)*(noise - Min)) / (Max - Min) + a
return noise_normal, clean_normal
def denormalization(data, a, b, Max, Min): # GAN 값 도출 이후 역정규화를 통해 STFT로 변환하기 위한 함수
denorm = ((data - a) * (Max - Min))/(b - a) + Min
return denorm
#%%
hop_length = 257 # number of samples per time-step in spectrogram
n_fft = 510
noise = np.zeros((len(noise_list), 256, 256, 2)).astype('float32')
clean = np.zeros((len(clean_list), 256, 256, 2)).astype('float32')
#%%
class ConvTasNetParam:
__slots__ = 'causal', 'That', 'L', 'overlap'
def __init__(self,
causal: bool = False,
That: int = 256,
L: int = 16,
overlap: int = 0):
if overlap * 2 > L:
raise ValueError('`overlap` cannot be greater than half of `L`!')
self.causal = causal
self.That = That
self.L = L
self.overlap = overlap
#%%
def wave_to_spec(audio, param = ConvTasNetParam()):
num_samples = audio.shape[0]
num_portions = (num_samples - param.overlap) // (param.That * (param.L - param.overlap))
num_samples_output = num_portions * param.That * (param.L - param.overlap)
num_samples = num_samples_output + param.overlap
audio = audio[:num_samples]
model_input = np.zeros((num_portions, param.That, param.L))
for i in range(num_portions):
for j in range(param.That):
begin = (i * param.That + j) * (param.L - param.overlap)
end = begin + param.L
model_input[i][j] = audio[begin:end]
whole_picture = np.zeros((param.That, num_portions*param.L))
for i in range(num_portions):
whole_picture[:param.That, i*param.L:i*param.L+param.L] = model_input[i]
return whole_picture
#%%
mag_max = []
mag_min = []
for i in range(len(noise)):
noi, sr = librosa.load(noise_path + noise_list[i], sr=16384)
cle, sr = librosa.load(clean_path + clean_list[i], sr=16384)
stft_noise = librosa.stft(noi, n_fft=n_fft, hop_length=hop_length)
stft_clean = librosa.stft(cle, n_fft=n_fft, hop_length=hop_length)
magnitude_noise = np.abs(stft_noise)
magnitude_clean = np.abs(stft_clean)
log_magnitude_noise = librosa.amplitude_to_db(magnitude_noise)
log_magnitude_clean = librosa.amplitude_to_db(magnitude_clean)
mag_max.append(np.max((log_magnitude_noise, log_magnitude_clean)))
mag_min.append(np.min((log_magnitude_noise, log_magnitude_clean)))
log_magnitude_noise, log_magnitude_clean = normalization(log_magnitude_noise,
log_magnitude_clean,
-1, 1,
np.max((log_magnitude_noise, log_magnitude_clean)),
np.min((log_magnitude_noise, log_magnitude_clean)))
noise[i,:,:,0] = log_magnitude_noise.astype('float32')
clean[i,:,:,0] = log_magnitude_clean.astype('float32')
noise[i,:,:,1] = wave_to_spec(noi).astype('float32')
clean[i,:,:,1] = wave_to_spec(cle).astype('float32')
#%%
BUFFER_SIZE = 400
BATCH_SIZE = 1
IMG_WIDTH = 256
IMG_HEIGHT = 256
#%%
def train_test_divide(data, threshold):
train = data[:len(data)-threshold,:,:,:]
test = data[len(data)-threshold:len(data),:,:,:]
return train, test
#%% 원하는 개수만큼 test set으로 따로 분류해둔다.
test_count = 40
noise_train, noise_test = train_test_divide(noise, test_count)
clean_train, clean_test = train_test_divide(clean, test_count)
mag_max_test = mag_max[len(mag_max)-test_count:len(mag_max)]
mag_min_test = mag_min[len(mag_min)-test_count:len(mag_min)]
#%%
train_dataset = tf.data.Dataset.from_tensor_slices((noise_train,clean_train))
print(train_dataset)
#train_dataset = train_dataset.shuffle(BUFFER_SIZE)
train_dataset = train_dataset.batch(BATCH_SIZE)
#%%
test_dataset = tf.data.Dataset.from_tensor_slices((noise_test,clean_test))
print(test_dataset)
# test_dataset = test_dataset.shuffle(BUFFER_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)
#%%
OUTPUT_CHANNELS = 2
#%%
def downsample(filters, size, apply_batchnorm=True):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2D(filters, size, strides=2, padding='same', # stride = 2로 인해서 이미지의 사이즈가 반으로 줄어든다.
kernel_initializer=initializer, use_bias=False))
if apply_batchnorm:
result.add(tf.keras.layers.BatchNormalization())
result.add(tf.keras.layers.LeakyReLU())
return result
#%%
def upsample(filters, size, apply_dropout=False):
initializer = tf.random_normal_initializer(0., 0.02)
result = tf.keras.Sequential()
result.add(
tf.keras.layers.Conv2DTranspose(filters, size, strides=2,
padding='same',
kernel_initializer=initializer,
use_bias=False))
result.add(tf.keras.layers.BatchNormalization())
if apply_dropout:
result.add(tf.keras.layers.Dropout(0.5))
result.add(tf.keras.layers.ReLU())
return result
#%%
def Generator():
inputs = tf.keras.layers.Input(shape=[256,256,2])
down_stack = [
downsample(64, 4, apply_batchnorm=False), # (bs, 128, 128, 64)
downsample(128, 4), # (bs, 64, 64, 128)
downsample(256, 4), # (bs, 32, 32, 256)
downsample(512, 4), # (bs, 16, 16, 512)
downsample(512, 4), # (bs, 8, 8, 512)
downsample(512, 4), # (bs, 4, 4, 512)
downsample(512, 4), # (bs, 2, 2, 512)
downsample(512, 4), # (bs, 1, 1, 512)
]
up_stack = [
upsample(512, 4, apply_dropout=True), # (bs, 2, 2, 1024)
upsample(512, 4, apply_dropout=True), # (bs, 4, 4, 1024)
upsample(512, 4, apply_dropout=True), # (bs, 8, 8, 1024)
upsample(512, 4), # (bs, 16, 16, 1024)
upsample(256, 4), # (bs, 32, 32, 512)
upsample(128, 4), # (bs, 64, 64, 256)
upsample(64, 4), # (bs, 128, 128, 128)
]
initializer = tf.random_normal_initializer(0., 0.02)
last = tf.keras.layers.Conv2DTranspose(OUTPUT_CHANNELS, 4,
strides=2,
padding='same',
kernel_initializer=initializer,
activation='tanh') # (bs, 256, 256, 2)
x = inputs
# Downsampling through the model
skips = []
for down in down_stack:
x = down(x)
skips.append(x)
skips = reversed(skips[:-1])
# Upsampling and establishing the skip connections
for up, skip in zip(up_stack, skips):
x = up(x)
x = tf.keras.layers.Concatenate()([x, skip])
x = last(x)
return tf.keras.Model(inputs=inputs, outputs=x)
#%%
generator = Generator()
tf.keras.utils.plot_model(generator, show_shapes=True, dpi=64)
#%%
inp, _ = noise_train, clean_train
gen_output = generator(inp[0][tf.newaxis,...], training=False)
plt.imshow(gen_output[0,:,:,1])
#%%
LAMBDA = 100
loss_object = tf.keras.losses.BinaryCrossentropy(from_logits=True)
#%%
def generator_loss(disc_generated_output, gen_output, target):
gan_loss = loss_object(tf.ones_like(disc_generated_output), disc_generated_output)
# mean absolute error
l1_loss = tf.reduce_mean(tf.abs(target - gen_output))
total_gen_loss = gan_loss + (LAMBDA * l1_loss)
return total_gen_loss, gan_loss, l1_loss
#%%
def Discriminator():
initializer = tf.random_normal_initializer(0., 0.02)
inp = tf.keras.layers.Input(shape=[256, 256, 2], name='input_image')
tar = tf.keras.layers.Input(shape=[256, 256, 2], name='target_image')
x = tf.keras.layers.concatenate([inp, tar]) # (bs, 256, 256, channels*2)
down1 = downsample(64, 4, False)(x) # (bs, 128, 128, 64)
down2 = downsample(128, 4)(down1) # (bs, 64, 64, 128)
down3 = downsample(256, 4)(down2) # (bs, 32, 32, 256)
zero_pad1 = tf.keras.layers.ZeroPadding2D()(down3) # (bs, 34, 34, 256)
conv = tf.keras.layers.Conv2D(512, 4, strides=1,
kernel_initializer=initializer,
use_bias=False)(zero_pad1) # (bs, 31, 31, 512)
batchnorm1 = tf.keras.layers.BatchNormalization()(conv)
leaky_relu = tf.keras.layers.LeakyReLU()(batchnorm1)
zero_pad2 = tf.keras.layers.ZeroPadding2D()(leaky_relu) # (bs, 33, 33, 512)
last = tf.keras.layers.Conv2D(1, 4, strides=1,
kernel_initializer=initializer)(zero_pad2) # (bs, 30, 30, 1)
return tf.keras.Model(inputs=[inp, tar], outputs=last)
#%%
discriminator = Discriminator()
tf.keras.utils.plot_model(discriminator, show_shapes=True, dpi=64)
#%%
def discriminator_loss(disc_real_output, disc_generated_output):
real_loss = loss_object(tf.ones_like(disc_real_output), disc_real_output)
generated_loss = loss_object(tf.zeros_like(disc_generated_output), disc_generated_output)
total_disc_loss = real_loss + generated_loss
return total_disc_loss
#%%
generator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
discriminator_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)
#%%
checkpoint_dir = '.\\training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator)
#%%
def generate_images(model, test_input, tar):
prediction = model(test_input, training=True)
plt.figure(figsize=(15,15))
mag_list = [test_input[0][:,:,0], tar[0][:,:,0], prediction[0][:,:,0]]
title = ['Input Magnitude', 'Ground Truth', 'Predicted Image']
for i in range(3):
plt.subplot(1, 3, i+1)
plt.title(title[i])
# getting the pixel values between [0, 1] to plot it.
plt.imshow(mag_list[i] * 0.5 + 0.5)
plt.axis('off')
plt.show()
plt.figure(figsize=(15,15))
ang_list = [test_input[0][:,:,1], tar[0][:,:,1], prediction[0][:,:,1]]
title = ['Input Time Domain', 'Ground Truth', 'Predicted Image']
for i in range(3):
plt.subplot(1, 3, i+1)
plt.title(title[i])
# getting the pixel values between [0, 1] to plot it.
plt.imshow(ang_list[i] * 0.5 + 0.5)
plt.axis('off')
plt.show()
#%%
for example_input, example_target in test_dataset.take(5):
generate_images(generator, example_input, example_target)
#%%
EPOCHS = 100
#%%
import datetime
log_dir="logs/"
summary_writer = tf.summary.create_file_writer(
log_dir + "fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
#%%
@tf.function
def train_step(input_image, target, epoch):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
gen_output = generator(input_image, training=True)
disc_real_output = discriminator([input_image, target], training=True)
disc_generated_output = discriminator([input_image, gen_output], training=True)
gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(disc_generated_output, gen_output, target)
disc_loss = discriminator_loss(disc_real_output, disc_generated_output)
generator_gradients = gen_tape.gradient(gen_total_loss,
generator.trainable_variables)
discriminator_gradients = disc_tape.gradient(disc_loss,
discriminator.trainable_variables)
generator_optimizer.apply_gradients(zip(generator_gradients,
generator.trainable_variables))
discriminator_optimizer.apply_gradients(zip(discriminator_gradients,
discriminator.trainable_variables))
with summary_writer.as_default():
tf.summary.scalar('gen_total_loss', gen_total_loss, step=epoch)
tf.summary.scalar('gen_gan_loss', gen_gan_loss, step=epoch)
tf.summary.scalar('gen_l1_loss', gen_l1_loss, step=epoch)
tf.summary.scalar('disc_loss', disc_loss, step=epoch)
#%%
def fit(train_ds, epochs, test_ds):
for epoch in range(epochs):
start = time.time()
display.clear_output(wait=True)
for example_input, example_target in test_ds.take(1):
generate_images(generator, example_input, example_target)
print("Epoch: ", epoch)
# Train
for n, (input_image, target) in train_ds.enumerate():
#print('.', end='')
#if (n+1) % 100 == 0:
# print()
train_step(input_image, target, epoch)
#print()
# saving (checkpoint) the model every 20 epochs
if (epoch + 1) % 20 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print ('Time taken for epoch {} is {} sec\n'.format(epoch + 1,
time.time()-start))
checkpoint.save(file_prefix = checkpoint_prefix)
#%%
fit(train_dataset, EPOCHS, test_dataset)
#%%
checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
#%%
# Run the trained model on a few examples from the test dataset
for inp, tar in test_dataset.take(40):
generate_images(generator, inp, tar)
#%%
def return_to_audio(whole_picture, param = ConvTasNetParam()):
data = np.zeros((int(65536/(param.That*param.L)), param.That, param.L)).astype('float32')
for i in range(int(65536/(param.That*param.L))):
data[i] = whole_picture[:,i*param.L:i*param.L + param.L]
re_audio = np.zeros((65536)).astype('float32')
for i in range(int(65536/(param.That*param.L))):
for j in range(param.That):
begin = (i * param.That + j) * (param.L - param.overlap)
end = begin + param.L
re_audio[begin:end] = data[i][j]
return re_audio
#%%
def mag_ang_to_stft(mag, ang):
pi = 3.1415927
half = pi / 2
over = ang > half
lower = ang < -half
plus = ((0 <= ang) & (ang <= half))
minus = ((0 > ang) & (ang >= -half))
ang_list = [plus, minus, over, lower]
stft = np.zeros((256,256)).astype('complex64')
for idx, i in enumerate(ang_list):
real = mag[i]*np.cos(ang[i])
imag = mag[i]*np.sin(ang[i])*complex(0,1)
real_1 = mag[i]*np.cos(-ang[i])
imag_1 = mag[i]*np.sin(-ang[i])*complex(0,1)
real_2 = mag[i]*np.cos(pi - ang[i])
imag_2 = mag[i]*np.sin(pi - ang[i])*complex(0,1)
real_3 = mag[i]*np.cos(pi + ang[i])
imag_3 = mag[i]*np.sin(pi + ang[i])*complex(0,1)
if idx == 0:
stft[i] = real + imag
elif idx == 1:
stft[i] = real_1 - imag_1
elif idx == 2:
stft[i] = -real_2 + imag_2
elif idx == 3:
stft[i] = -real_3 - imag_3
return stft
#%%
import soundfile
count = 0
for inp, tar in test_dataset.take(40):
prediction = generator(inp, training = True)
max_mag_clean = mag_max_test[count]
min_mag_clean = mag_min_test[count]
gen_mag = np.array(prediction[0][:,:,0])
gt_mag = np.array(tar[0][:,:,0])
gen_mag_denorm = denormalization(gen_mag, -1, 1, max_mag_clean, min_mag_clean)
gt_mag_denorm = denormalization(gt_mag, -1, 1, max_mag_clean, min_mag_clean)
gen_db_amp = librosa.db_to_amplitude(gen_mag_denorm)
gt_db_amp = librosa.db_to_amplitude(gt_mag_denorm)
re_audio = return_to_audio(prediction[0][:,:,1])
ground_truth = return_to_audio(tar[0][:,:,1])
pre_stft = librosa.stft(re_audio, n_fft=n_fft, hop_length=hop_length)
gt_stft = librosa.stft(ground_truth, n_fft=n_fft, hop_length=hop_length)
pre_angle = np.angle(pre_stft)
gt_angle = np.angle(gt_stft)
gen_stft = mag_ang_to_stft(gen_db_amp, pre_angle)
gt_stft = mag_ang_to_stft(gt_db_amp, gt_angle)
gen_wav = librosa.istft(gen_stft, hop_length = hop_length, win_length=n_fft)
gt_wav = librosa.istft(gt_stft, hop_length = hop_length, win_length=n_fft)
soundfile.write('C:/Users/NGV32/.spyder-py3/hyundai_project/output/1060ea_prediction/gen_wav_{}.wav' .format(count), gen_wav, sr)
soundfile.write('C:/Users/NGV32/.spyder-py3/hyundai_project/output/1060ea_ground_truth/gt_wav_{}.wav' .format(count), gt_wav, sr)
count += 1
#%%
import soundfile
count = 0
for inp, tar in test_dataset.take(40):
prediction = generator(inp, training = True)
re_audio = return_to_audio(prediction[0][:,:,1])
Input = return_to_audio(inp[0][:,:,1])
ground_truth = return_to_audio(tar[0][:,:,1])
soundfile.write('C:/Users/NGV32/.spyder-py3/hyundai_project/output/1060ea_prediction_timedomain/gen_wav_{}.wav' .format(count),
re_audio, sr)
plt.plot(Input)
plt.plot(re_audio, 'r')
plt.plot(ground_truth, 'g')
plt.show()
count += 1 | [
"noreply@github.com"
] | uaiu.noreply@github.com |
45915aa5951cf36b45dc79072c4204df2fb738b4 | ff4c9cceea7dad10af253aad4a5f243a8ac7e0f3 | /myPackage/__init__.py | ef64eef0f1d1a8f98d7aa93a92a3e538bb1385aa | [
"MIT"
] | permissive | EneoBovino/custom_lib | f6393d041eda5a8ac1d566c7b8d3448dbd452f6f | ee37f3477bb095cf5c7348f190915c6a0200646d | refs/heads/master | 2020-03-17T07:34:19.400607 | 2018-05-14T18:26:25 | 2018-05-14T18:26:25 | 133,401,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18 | py | import somePython
| [
"noreply@github.com"
] | EneoBovino.noreply@github.com |
c28cbafe1065d591073c1e76a2a96cc35eb02723 | c4284c650b259bb94c9509b0030410d6c371ab4b | /train/semantic_segmentation/utils/image.py | 001ee63c8f6ed84332b1aaf46a2bc5a26bc18fe4 | [
"MIT",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | delair-ai/DISIR | 38ffdc1c9a7d9582be7181a73fdf6c9e869c0a09 | b8f6735c1125ef651841d52a94a8b3fc48ee77b7 | refs/heads/master | 2023-02-08T16:20:49.618716 | 2022-04-11T12:19:38 | 2022-04-11T12:19:38 | 241,604,320 | 41 | 9 | MIT | 2023-02-08T00:44:30 | 2020-02-19T11:25:24 | Python | UTF-8 | Python | false | false | 1,155 | py | import itertools
import buzzard as buzz
import cv2 as cv
import numpy as np
def sliding_window(top, step=10, window_size=(20, 20)):
""" Slide a window_shape window across the image with a stride of step """
for x in range(0, top.shape[1], step):
if x + window_size[0] > top.shape[1]:
x = top.shape[1] - window_size[0]
for y in range(0, top.shape[2], step):
if y + window_size[1] > top.shape[2]:
y = top.shape[2] - window_size[1]
yield x, y, window_size[0], window_size[1]
def grouper(n, iterable):
""" Browse an iterator by chunk of n elements """
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it, n))
if not chunk:
return
yield chunk
def from_coord_to_patch(img, coords):
"""Returns patches of the input image. coors is an output of grouper(n, sliding window(...))"""
image_patches = [np.copy(img[:, x : x + w, y : y + h]) for x, y, w, h in coords]
image_patches = np.asarray(image_patches)
# image_patches = torch.from_numpy(image_patches).type(torch.FloatTensor)
return image_patches
| [
"gaston.lenczner@gmail.com"
] | gaston.lenczner@gmail.com |
df89646257218da18e688a4ddbda8144b57a3f87 | 29791610aed7b1f29dbe41650c907cbd9a0cc0bd | /python_projects/loginRegistration/main/migrations/0001_initial.py | 9a665b845494de843659414f1384b21b677d38cd | [] | no_license | Marvin-Gerodias/PROJECTS | ef7d8e812fe748a9d962aa5c7e026c8753bf9e07 | b2694722d3893fb704d7f397237441b7034cb71c | refs/heads/main | 2023-05-04T01:39:37.215681 | 2021-05-24T16:28:33 | 2021-05-24T16:28:33 | 344,958,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,342 | py | # Generated by Django 3.2.2 on 2021-05-21 01:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('fname', models.CharField(max_length=23)),
('lname', models.CharField(max_length=23)),
('email', models.EmailField(max_length=254, unique=True)),
('password', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Ticket',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.CharField(max_length=255)),
('issue_type', models.CharField(max_length=255)),
('comment', models.CharField(max_length=255)),
('priority_level', models.CharField(max_length=255)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('assigned_to', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tickets', to='main.user')),
],
),
migrations.CreateModel(
name='Chat',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('chat_comment', models.CharField(max_length=500)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('assigned_ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='chat_log', to='main.ticket')),
('mentioned_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='my_comment', to='main.user')),
],
),
]
| [
"marvin.gabriel@gmail.com"
] | marvin.gabriel@gmail.com |
605b9fe4d1a7957cb2c55e30fa0363e03ff3f7eb | 8bde826917476ba95bd3e9b4c33d4b28284c1774 | /bin/fasta2phylip.py | 20c4f9be477fe93b9186aee62a7a87c516416368 | [] | no_license | rpetit3-education/ibs594-phylogenetics | 2935d2ea3ba0ab41967cb0ddf42a2850328034e4 | f39048354d636300ba1a2067a0bdc5f0c6bddc95 | refs/heads/master | 2020-05-29T12:27:21.973218 | 2014-10-08T01:42:56 | 2014-10-08T01:42:56 | 24,832,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 294 | py | #! /usr/bin/env python
'''
'''
import sys
from Bio import AlignIO
input_handle = open(sys.argv[1], "rU")
output_handle = open(sys.argv[2], "w")
alignments = AlignIO.parse(input_handle, "fasta")
AlignIO.write(alignments, output_handle, "phylip")
output_handle.close()
input_handle.close()
| [
"robbie.petit@gmail.com"
] | robbie.petit@gmail.com |
ee8146afae548b1c71be4263cc8daf4dd4d4be02 | 2dee8118560e51cb5ec93a078cf1353085f40b67 | /main_app/errors/routes.py | e78e6849cc7b243cdb186beddd38147bf84ca1dd | [] | no_license | TerraX3000/webapp-mslogin | d8d8a1805dbf40f489dbfaf1db8258a83b4ebdcf | 596a8182b290d0bf853f3c3bf9be963f0c31dad1 | refs/heads/main | 2023-02-15T01:00:17.549315 | 2021-01-02T14:14:45 | 2021-01-02T14:14:45 | 326,041,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 806 | py | from flask import render_template
from . import bp
from main_app.log import logger, wrap, entering, exiting
# Reference: Python Flask Tutorials: Tutorial 12: Custom Error Pages
# https://coreyms.com/development/python/python-flask-tutorials-full-series
@bp.app_errorhandler(404)
@wrap(entering, exiting)
def error_404(error):
return render_template("404.html", title="404 Error"), 404
@bp.app_errorhandler(401)
@wrap(entering, exiting)
def error_401(error):
return render_template("401.html", title="401 Error"), 401
@bp.app_errorhandler(403)
@wrap(entering, exiting)
def error_403(error):
return render_template("403.html", title="403 Error"), 403
@bp.app_errorhandler(500)
@wrap(entering, exiting)
def error_500(error):
return render_template("500.html", title="500 Error"), 500
| [
"ken.kranz57@gmail.com"
] | ken.kranz57@gmail.com |
39e25297ceed5f44f9099eab6cfa216a86b008be | 9f7f397a61cd163ae1c3974fb1ce54ee99b7f6bb | /work3/plactice/postman.py | bed8f6e309393f10630bda98f49c50a0161fce90 | [] | no_license | ring-ring097/Optimization | c41b8c53447f5e2c8626d1b25143d9f564f5c86a | a7c7cd22bf3dc112b846e66af68c34cfc0e32bce | refs/heads/main | 2023-06-20T12:40:10.570618 | 2021-07-21T04:48:14 | 2021-07-21T04:48:14 | 387,990,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,324 | py | import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
from itertools import combinations
# 問題のグラフ生成と表示
np.random.seed(1000)
G = nx.grid_2d_graph(4,3)
for (u,v) in G.edges():
G[u][v]['weight'] = np.random.randint(1,6)
nx.draw_networkx(G,
pos={v:v for v in G.nodes()},
node_color='lightgray',
node_size=1500,
width=1)
nx.draw_networkx_edge_labels(G,
edge_labels={(u,v):G[u][v]['weight'] for (u,v) in G.edges()},
pos={v:v for v in G.nodes()},)
plt.axis('off')
plt.show()
# 全ての奇点間の最短路の長さを計算
# 奇点の点集合作成
Vodd = [v for v in G.nodes() if G.degree(v)%2 == 1]
#dist[vodd1][vodd2]に計算結果を格納
dist = dict(nx.all_pairs_dijkstra_path_length(G))
# 頂点がVoddの完全グラフを作成(重みは最短路長)
K = nx.Graph()
K.add_weighted_edges_from([(u,v,dist[u][v])
for (u,v) in combinations(Vodd, 2)])
nx.draw_networkx(K,
pos={v:v for v in K.nodes()},
node_color='lightgray',
node_size=1500,
width=1)
nx.draw_networkx_edge_labels(K,
pos={v:v for v in K.nodes()},
edge_labels={(u,v):K[u][v]['weight'] for (u,v) in K.edges()})
plt.axis('off')
plt.show()
# 重み最小の完全マッチングを求める(偶数個の頂点からなる完全グラフは必ず最適解を持つ)
# 重みを汎化して重み最大マッチングを求めることで重み最小マッチングを得る
CK = K.copy()
wm = max(CK[u][v]['weight'] for (u,v) in CK.edges())
for (u,v) in K.edges():
CK[u][v]['weight'] = wm - CK[u][v]['weight'] + 1
m = nx.max_weight_matching(CK, maxcardinality=True)
md = dict(m)
mm = []
for (u,v) in md.items():
if (u,v) not in mm and (v,u) not in mm:
mm.append((u,v))
nx.draw_networkx(CK,
pos={v:v for v in CK.nodes()},
node_color='lightgray',
node_size=1500,
width=1)
nx.draw_networkx_edge_labels(CK,
pos={v:v for v in CK.nodes()},
edge_labels={(u,v):CK[u][v]['weight'] for (u,v) in CK.edges()})
nx.draw_networkx_edges(CK,
pos={v:v for v in CK.nodes()},
edgelist=mm,
width=5)
plt.axis('off')
plt.show()
# マッチング(最短路)に沿って,枝を重複させてオイラー閉路を求める
CG = G.copy()
for (u,v) in mm:
dp = nx.dijkstra_path(G, u, v)
for i in range(len(dp)-1):
(ux, uy) = dp[i]
(vx, vy) = dp[i+1]
if ux == vx:
wx = ux + 0.3
wy = (uy + vy) / 2.0
else:
wx = (ux + vx) / 2.0
wy = uy + 0.3
CG.add_edges_from([((ux,uy), (wx,wy)), ((wx,wy), (vx,vy))])
nx.draw_networkx(CG,
pos={v:v for v in CG.nodes()},
node_color='lightgray',
node_size=1500,
width=1)
plt.axis('off')
plt.show()
# できたグラフからオイラー閉路を作成
ec = nx.eulerian_circuit(CG)
for (i,j) in ec:
print(i, end='->')
print('end')
| [
"tosa.rinto.td@alumni.tsukuba.ac.jp"
] | tosa.rinto.td@alumni.tsukuba.ac.jp |
44b7c6f55aadf4b72a82ef6100f9975da1f53c8f | 734b5ba4980bf382a9c2126dba6d19e9621ff564 | /oshi/grammar.py | ae1e522fdef9e6d40a01d5ad0e0a338c5938f46c | [] | no_license | sorashi/oshi | e8329c666c979bd81bb1b12fe71b7a6db0a81ba2 | 6e6bb5ab534b630b3ba46f8d8022397786388ea5 | refs/heads/master | 2022-04-04T00:25:23.512272 | 2020-02-11T15:10:53 | 2020-02-11T15:10:53 | 227,387,870 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,183 | py | import re
from os import path
from fnmatch import fnmatch # fnmatch(test_string, glob_pattern)
from typing import List
import database
RULE_REGEX = r'^(\S+)\s*(\S*)\s+〜(\S*)\s*(\S*)\s+for\s+(\S*)\s+〜(\S*) +((?:[ \t]*\S+)+)\s*$'
CURRENT_DIRECTORY = path.dirname(__file__)
RULES_FILENAME = path.join(CURRENT_DIRECTORY, 'grammar.rules')
VOWEL_KATAKANA = list("アイウエオ")
SOUND_CHANGE = {
"": [""]*9,
"ア": list("さかがまばならわた"),
"イ": list("しきぎみびにりいち"),
"ウ": list("すくぐむぶぬるうつ"),
"エ": list("せけげめべねれえて"),
"オ": list("そこごもぼのろおと")
}
class Rule:
"""
Represents a grammar rule
RULE [ROLE] PATTERN [POS] for TARGET TARGET_PATTERN POS-GLOB...
potential plain 〜エる v1 for plain 〜ウ v5[^r]* v5r vs-c
"""
def __init__(self, rule: str, role: str, pattern: str, pos: str, target: str,
target_pattern: str, pos_globs: List[str]):
assert type(pos_globs) is list
self.rule = rule
self.role = role
self.pattern = pattern
self.pos = pos
self.traget = target
self.target_pattern = target_pattern
self.pos_globs = pos_globs
def __str__(self):
string = self.rule
if self.role:
string += " " + self.role
string += " ~" + self.pattern
if self.pos:
string += " " + self.pos
string += " for {} ~{} {}".format(self.traget, self.target_pattern, " ".join(self.pos_globs))
return string
def __repr__(self):
return "{} {} ~{} {} for {} ~{} {}".format(self.rule, self.role, self.pattern, self.pos, self.traget, self.target_pattern, " ".join(self.pos_globs))
def apply_rule_backward(expression: str, rule: Rule):
return expression[:len(expression)-len(rule.pattern)] + rule.target_pattern
def apply_rule_forward(expression: str, rule: Rule):
return expression[:len(expression)-len(rule.target_pattern)] + rule.pattern
def parse_rules(filename=RULES_FILENAME):
rules = []
with open(filename, 'r', encoding='utf-8') as f:
line_number = 0
for line in f:
line_number += 1
if re.match(r'^\s*#.*$', line): # skip comment lines
continue
if re.match(r'^\s*$', line): # skip empty lines
continue
line = re.sub(r'#.*', "", line) # remove comments
match = re.match(RULE_REGEX, line)
if not match:
raise SyntaxError("Error parsing line {}: {}".format(line_number, line))
rule, role, pattern, pos, target, target_pattern, pos_globs = match.groups()
pos_globs = pos_globs.split()
# role and pos are optional
# pattern and target_pattern can be empty
# rule, target and globs are required
if "" in [rule, target] or len(pos_globs) <= 0:
raise SyntaxError("Error parsing line {}: {}".format(line_number, line))
role = None if role == "" else role
pos = None if pos == "" else pos
if len(pattern) <= 0 or pattern[0] not in VOWEL_KATAKANA:
rules.append(Rule(rule, role, pattern, pos, target, target_pattern, pos_globs))
else:
# expansion required
expansion_regex = r'^([アイウエオ]?).*$'
sound = re.match(expansion_regex, pattern)[1]
assert sound != "" # is guaranteed by previous if statement
# sound_target can be empty
sound_target = re.match(expansion_regex, target_pattern)[1]
assert 9 == len(SOUND_CHANGE[sound]) == len(SOUND_CHANGE[sound_target])
for i in range(9):
rules.append(Rule(rule, role,
re.sub(expansion_regex, SOUND_CHANGE[sound][i], pattern),
pos, target,
re.sub(expansion_regex, SOUND_CHANGE[sound_target][i], target_pattern),
pos_globs))
return rules
def lookup(rules: List[Rule], expression: str, db: database.Database = None,
tags: List[str] = ["*"], role: str = None, path: List[Rule] = [], verbous = False):
"""
Recursively looks up what form the expression is in
Top level call: lookup(rules, expression, database)
rules - list of grammar rules to use for lookup
expression - a conjugated Japanese expression
db - a database.Database
tags - list of glob patterns for possible tags of the expression
role - for example "plain", "past", or None = any role
path - holds the traversed path to the current expression
verbous - whether should print() information about the search
retruns a tuple (path, database entry) or None if nothing was found
"""
if not isinstance(db, database.Database):
raise ValueError("Database invalid")
if len(path) <= 0:
entry = db.find_exact(expression)
if entry:
return path, entry
if verbous:
print("{}::({}, {}, {})::::".format(" " * len(path), expression, " ".join(tags), role))
if len(path) > 20:
raise RuntimeError("Possible recursion loop (depth limit reached)")
# find applicable rules
applicable = []
for rule in rules:
if rule in path:
continue # skip rules that have already been used
if role not in [None, rule.rule, rule.role]:
continue # skip rules with unmatching role
if not expression.endswith(rule.pattern):
continue # skip rules with unmatching pattern
# take rules that contain a matching tag
# the rule has a definitive pos
if rule.pos and any(fnmatch(rule.pos, tag_glob) for tag_glob in tags):
applicable.append(rule)
continue
for pos_glob in rule.pos_globs:
if any(fnmatch(pos_glob, tag_glob) for tag_glob in tags):
applicable.append(rule)
break
if len(applicable) == 0:
# no applicable rules found
if verbous:
print(" "*len(path) + "dead end")
return None
for rule in applicable:
# new expression is built by removing the suffix in the pattern and replacing it with
# target_pattern, for example 書いてた -> 書いてる
new_expression = apply_rule_backward(expression, rule)
if verbous:
print(" "*len(path) + str(rule))
if rule.traget == "plain":
entry = db.find_exact(new_expression)
if entry:
# this node is the result
return path + [rule], entry
result = lookup(rules, new_expression, db,
rule.pos_globs, rule.traget, path + [rule], verbous)
# if a result was found we return all the way up from this branch
if result:
return result
# else the next rule branch will be explored
return None # none of the applicable rules are correct
| [
"prazak.dennis@gmail.com"
] | prazak.dennis@gmail.com |
ea98f309572f9a831500b02f4be4bf1c76855e92 | c3655bc6c3c6941e8bec64345af879585f0ce8f1 | /ex06.py | 03d9e1d13ea78e3caada46f275ebfeb0c7d3afac | [] | no_license | Jean-Alesi/Exercicios | 5935eccb99813994f0d18c126ea728bafdc5d5e2 | 5fa88d067c287090147febdc9f7e7cb59c6bb7a1 | refs/heads/master | 2020-12-19T08:32:45.968972 | 2020-02-01T00:21:51 | 2020-02-01T00:21:51 | 235,681,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | n = int(input('digite um número:'))
dobro = (n * 2)
triplo = (n * 3)
raiz = (n ** (1/2))
print('você digitou o número {} \n o dobro é {} \n o triplo é {} \n e a raiz é {:.2f}'.format(n, dobro, triplo, raiz)) | [
"noreply@github.com"
] | Jean-Alesi.noreply@github.com |
a8a8b14820fcddd5bd55fd019f52ee57c7ff3a51 | de9bd97adcbe4d278a1bf1d5f9107e87b94366e1 | /coding_solutions/Day9(28-05-2020)/Program2.py | 31f78a9bb4008fe640973829d9b4d7bc8af22e25 | [] | no_license | alvas-education-foundation/ANUSHA_4AL17CS007 | 6ed2957d6d1b1b1f2172de3c8ba6bfd3f886aab9 | 031849369448dd60f56769abb630fc7cf22fe325 | refs/heads/master | 2022-10-29T10:49:23.188604 | 2020-06-15T15:50:00 | 2020-06-15T15:50:00 | 267,532,875 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 366 | py | #Python program to find digital root of a #number
n = int(input("Enter the digit\n"))
def digital_root(n):
m = len(str(n))
s=0
for i in range(m):
s = s+ n%10
n = n//10
print(s)
if(len(str(s))>1):
return(digital_root(s))
print(digital_root(n))
Output:
Enter the digit
162536738292
54
9
Enter the digit
0
0
| [
"noreply@github.com"
] | alvas-education-foundation.noreply@github.com |
6841b5f8f2844439542581821019d5fd11329764 | 07f837d8c5236fe5e75ef510cd296814452370ce | /py/four_hour_cloud.py | 220c212f5dfc9d69fd0d4fccfd0d8b1a5659a04f | [
"Apache-2.0"
] | permissive | vkuznet/h2o | 6f9006a5186b964bac266981d9082aec7bc1067c | e08f7014f228cbaecfb21f57379970e6a3ac0756 | refs/heads/master | 2021-08-28T11:37:52.099953 | 2021-08-10T22:43:34 | 2021-08-10T22:43:34 | 20,032,996 | 0 | 0 | Apache-2.0 | 2021-08-10T22:43:35 | 2014-05-21T18:46:27 | Java | UTF-8 | Python | false | false | 3,693 | py | #!/usr/bin/python
import unittest, time, sys, random, datetime
sys.path.extend(['.','..','py','../h2o/py','../../h2o/py'])
import h2o, h2o_hosts, h2o_cmd, h2o_browse as h2b
import h2o_print as h2p
beginning = time.time()
def log(msg):
print "\033[92m[0xdata] \033[0m", msg
CHECK_WHILE_SLEEPING = False
print "Don't start a test yet..."
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED, localhost
SEED = h2o.setup_random_seed()
localhost = h2o.decide_if_localhost()
if (localhost):
# h2o.nodes[0].delete_keys_at_teardown should cause the testdir_release
# tests to delete keys after each test completion (not cloud teardown)
h2o.build_cloud(3, create_json=True, java_heap_GB=4, delete_keys_at_teardown=False)
else:
h2o_hosts.build_cloud_with_hosts(create_json=True, delete_keys_at_teardown=False)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_build_for_clone(self):
# python gets confused about which 'start' if I used start here
elapsed = time.time() - beginning
print "\n%0.2f seconds to get here from start" % elapsed
# might as well open a browser on it? (because the ip/port will vary
# maybe just print the ip/port for now
## h2b.browseTheCloud()
maxTime = 4*3600
totalTime = 0
incrTime = 60
h2p.purple_print("\nSleeping for total of", (maxTime+0.0)/3600, "hours.")
print "Will check h2o logs every", incrTime, "seconds"
print "Should be able to run another test using h2o-nodes.json to clone cloud"
print "i.e. h2o.build_cloud_with_json()"
print "Bad test if a running test shuts down the cloud. I'm supposed to!\n"
h2p.green_print("To watch cloud in browser follow address:")
h2p.green_print(" http://{0}:{1}/Cloud.html".format(h2o.nodes[0].http_addr, h2o.nodes[0].port))
h2p.blue_print("You can start a test (or tests) now!")
h2p.blue_print("Will Check cloud status every %s secs and kill cloud if wrong or no answer" % incrTime)
if CHECK_WHILE_SLEEPING:
h2p.blue_print("Will also look at redirected stdout/stderr logs in sandbox every %s secs" % incrTime)
h2p.red_print("No checking of logs while sleeping, or check of cloud status")
h2p.yellow_print("So if H2O stack traces, it's up to you to kill me if 4 hours is too long")
h2p.yellow_print("ctrl-c will cause all jvms to die(thru psutil terminate, paramiko channel death or h2o shutdown...")
while (totalTime<maxTime): # die after 4 hours
h2o.sleep(incrTime)
totalTime += incrTime
# good to touch all the nodes to see if they're still responsive
# give them up to 120 secs to respond (each individually)
h2o.verify_cloud_size(timeoutSecs=120)
if CHECK_WHILE_SLEEPING:
print "Checking sandbox log files"
h2o.check_sandbox_for_errors(cloudShutdownIsError=True)
else:
print str(datetime.datetime.now()), h2o.python_cmd_line, "still here", totalTime, maxTime, incrTime
# don't do this, as the cloud may be hung?
if 1==0:
print "Shutting down cloud, but first delete all keys"
start = time.time()
h2i.delete_keys_at_all_nodes()
elapsed = time.time() - start
print "delete_keys_at_all_nodes(): took", elapsed, "secs"
if __name__ == '__main__':
h2o.unit_main()
| [
"kevin@0xdata.com"
] | kevin@0xdata.com |
55e42acf5a6c8f25fc0cc3d9ce43ac6c75cbe07c | c4accc95f2dddb90d357afbc3deebd56ded79dd3 | /proyecto/pv.py | 024990f115ac3cd61a9f4cdd598ce646d63cf743 | [] | no_license | cesaraul/python_codes | 2ab7b5091e210068676ab7e97486384bfdbee356 | 5d9601f52120ea7b731f50a816d3f9298c18b343 | refs/heads/master | 2020-06-30T16:07:01.444430 | 2019-08-07T17:12:49 | 2019-08-07T17:12:49 | 200,879,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | import click
from clients import commands as clients_commands
@click.group() # para indicar que es nuestro punto de entrada
@click.pass_context #crear contexto
def cli():
pass
cli.add_commands(clients_commands.all)
| [
"cesaraulcg@gmail.com"
] | cesaraulcg@gmail.com |
9345fcb14424dc41dfd8843cc32a6055649879f9 | 630e037b7d416a3bfc18c30b7c950ad63b416b10 | /TesCase/TC03b (Bonus).py | d415b2541fd137d92119a1df270257d0a57269bc | [] | no_license | kevinrizki1019/python-interpreter-using-cfg | 82db1738846a6a0e5699454336f0934d9633553e | d3f7b3c5a15bdb6fc207a5a3d2ad4d5ac2196067 | refs/heads/master | 2021-01-01T14:35:56.055825 | 2019-11-29T10:35:41 | 2019-11-29T10:35:41 | 239,322,185 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 606 | py | def get_rule_category(rule: dict) -> str:
''' Get rule category in string. This category is also a key for its corresponding dictionary.
Input(dict) = rule
Output(str) = category of the rule
Example:
Input = {'producer': 'N', 'product': ['people']}
Output = 'terminal'
'''
rule_product = rule[PRODUCT_KEY]
if len(rule_product) == 0:
return EPSILON_RULE_KEY
elif len(rule_product) == 1:
if rule_product[0].islower:
return TERMINAL_RULE_KEY
else:
return UNARY_RULE_KEY
elif len(rule_product) == 2:
return BINARY_RULE_KEY
else:
return N_ARIES_RULE_KEY | [
"kevinrizkimohammad@gmail.com"
] | kevinrizkimohammad@gmail.com |
51d94bb10630aafec0640620432c3ebe407246a3 | f5b5a6e3f844d849a05ff56c497638e607f940e0 | /capitulo 06/06.32 - Programa 6.10 Transformacao de range em uma lista.py | 51cd535815fbd14cb32451d366d9badde5012cb3 | [] | no_license | alexrogeriodj/Caixa-Eletronico-em-Python | 9237fa2f7f8fab5f17b7dd008af215fb0aaed29f | 96b5238437c88e89aed7a7b9c34b303e1e7d61e5 | refs/heads/master | 2020-09-06T21:47:36.169855 | 2019-11-09T00:22:14 | 2019-11-09T00:22:14 | 220,563,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 770 | py | ##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: Nilo Ney Coutinho Menezes
# Editora Novatec (c) 2010-2019
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira edição - Janeiro/2019 - ISBN 978-85-7522-718-3
# Site: http://python.nilo.pro.br/
#
# Arquivo: listagem3\capítulo 06\06.32 - Programa 6.10 – Transformação de range em uma lista.py
# Descrição: Programa 6.10 – Transformação de range em uma lista
##############################################################################
# Programa 6.10 – Transformação de range em uma lista
L = list(range(100, 1100, 50))
print(L)
| [
"noreply@github.com"
] | alexrogeriodj.noreply@github.com |
9d4a18c25ed901eedcfbd1f71a321317da85edc2 | 93441af33526d7b1c335c32e31c130926d373ff0 | /website/migrations/0001_initial.py | ca0a099e7b13d757906148a79383aafb422f31f1 | [] | no_license | Bunny303/MealPlanner | b435e6b99298718d964cd05cd126b7f3784facae | 9d836f8ce1bbf8300707a53cac1c6c9f889d4d0f | refs/heads/master | 2020-04-13T22:10:08.849328 | 2017-07-17T09:21:54 | 2017-07-17T09:21:54 | 54,634,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,251 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-06-15 20:31
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Day',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(choices=[(b'1', b'Monday'), (b'2', b'Tuesday'), (b'3', b'Wednesday'), (b'4', b'Thursday'), (b'5', b'Friday'), (b'6', b'Saturday'), (b'7', b'Sunday')], max_length=3)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('unit', models.CharField(choices=[(b'g', b'Gram'), (b'kg', b'Kilogram'), (b'number', b'Number')], default=b'kg', max_length=6)),
],
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50)),
('description', models.CharField(blank=True, max_length=550)),
('image', models.ImageField(null=True, upload_to=b'static')),
('portion_number', models.IntegerField(default=1)),
('category', models.IntegerField(blank=True, choices=[(1, b'Main Dish'), (2, b'Salad'), (3, b'Soup'), (4, b'Desert')], default=None, null=True)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='RecipeIngredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.FloatField(default=1.0)),
('ingredient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='website.Ingredient')),
('recipe', models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='website.Recipe')),
],
),
migrations.AddField(
model_name='recipe',
name='ingredients',
field=models.ManyToManyField(blank=True, through='website.RecipeIngredient', to='website.Ingredient'),
),
migrations.AddField(
model_name='recipe',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='day',
name='recipe',
field=models.ManyToManyField(blank=True, to='website.Recipe'),
),
]
| [
"andrey.andreev303@gmail.com"
] | andrey.andreev303@gmail.com |
addb680709e766e2bcc6ebdbd7e1d07e4cb983ba | 5caa089421d1de7b1e6a22eab5e5ad37a6cc45c2 | /파이썬 알고리즘 문제풀이(코딩테스트 대비)/섹션 3/2. 숫자만 추출/AA.py | b3c6d1741348d882e2634e07237b09d25f5214fa | [] | no_license | leeyjwinter/Algorythm-Python | 00055ca969dce2e1b56fac0ebdd6c380b0dda0f0 | 0fc87507c4c7d43a378bb298301a2fa3ad01b1bb | refs/heads/master | 2023-08-15T22:58:39.971771 | 2021-09-24T14:08:44 | 2021-09-24T14:08:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | #x.isdecimal() : x가 0~9까지면 true 반환
string_a = input()
number = []
for i in range(len(string_a)):
if string_a[i]<'A' and string_a[i]>='0':
number.append(string_a[i])
while number[0]=='0':
number.pop(0)
number=int(''.join(number))
def yaksu(x):
cnt = 0
if type(x**(1/2))=='int':
for j in range(1,x**(1/2)+1):
if x%j==0:
cnt+=1
cnt=cnt*2+1
else:
for j in range(1,round(x**(1/2))+1):
if x%j==0:
cnt+=1
cnt=cnt*2
return cnt
print(number)
print(yaksu(number))
| [
"james_22@naver.com"
] | james_22@naver.com |
0f8f10c674666f80a8dfb7dc011afc0af5ca45d6 | 41e69a518ff146ef299e9b807a7a96428effd958 | /test/test_full_operations_operation.py | ef25b6c9c5cb0a0793bc2948de26f0953af9ea2b | [] | no_license | daxslab/enzona-payment-python | 40797a8aea7d7185ad04fe401c4f699cb1d93309 | 9a7721445cc1331e14687374df872f911a565305 | refs/heads/master | 2022-07-13T15:34:53.171246 | 2020-05-16T04:18:51 | 2020-05-16T04:18:51 | 264,357,269 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,007 | py | # coding: utf-8
"""
PaymentAPI
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import enzona_payment
from enzona_payment.models.full_operations_operation import FullOperationsOperation # noqa: E501
from enzona_payment.rest import ApiException
class TestFullOperationsOperation(unittest.TestCase):
"""FullOperationsOperation unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testFullOperationsOperation(self):
"""Test FullOperationsOperation"""
# FIXME: construct object with mandatory attributes with example values
# model = enzona_payment.models.full_operations_operation.FullOperationsOperation() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"ccesar@daxslab.com"
] | ccesar@daxslab.com |
247f95d2dd1ba8c30dc05f432d9df8ab67e809ea | cd995dbf44647858e092f22fd99228b3aadc138a | /16 트라이/56 트라이 구현.py | a9e65658e8ed722ea191413bbcd928d182b72dc5 | [] | no_license | Wooyongjeong/python-algorithm-interview | ed970ae046bd65ac46fd4f42aaa386f353f97233 | c134dbb1aaff5f4c7aa930be79fcdf1dfad62cec | refs/heads/master | 2023-08-17T04:51:19.727919 | 2021-10-07T14:29:24 | 2021-10-07T14:29:24 | 398,216,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,336 | py | import collections
# 풀이 1. 딕셔너리를 이용해 간결한 트라이 구현
# 트라이의 노드
class TrieNode:
def __init__(self):
self.word = False
self.children = collections.defaultdict(TrieNode)
class Trie:
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = TrieNode()
# 단어 삽입
def insert(self, word: str) -> None:
"""
Inserts a word into the trie.
"""
node = self.root
for char in word:
node = node.children[char]
node.word = True
# 단어 존재 여부 판별
def search(self, word: str) -> bool:
"""
Returns if the word is in the trie.
"""
node = self.root
for char in word:
if char not in node.children:
return False
node = node.children[char]
return node.word
# 문자열로 시작 단어 존재 여부 판별별
def startsWith(self, prefix: str) -> bool:
"""
Returns if there is any word in the trie that starts with the given prefix.
"""
node = self.root
for char in prefix:
if char not in node.children:
return False
node = node.children[char]
return True
| [
"wooyong.dev@gmail.com"
] | wooyong.dev@gmail.com |
a1fc1832b01fb19aa877c1623da3ec49d6093447 | 46bd01358d2f112772a276617534b9869db64722 | /Ejercicios/raices.py | 3940f83979faa042e4c5067e695c6ba7f5cec6b6 | [] | no_license | ncardona10/Metodos_Computacionales | 84e8b2056680bd654b308bdbf6216f2a14bc0a61 | 45f70a08a416e64058f4da0e572a72d0b121f027 | refs/heads/master | 2020-03-18T13:14:33.497388 | 2018-11-06T02:32:27 | 2018-11-06T02:32:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 949 | py | import numpy as np
import matplotlib.pyplot as plt
g=9.8
def deriva(fun,v,h,d,theta):
h=0.01
delta=theta+h
df=(fun(v,h,d,delta)-fun(v,h,d,theta))/h
return df
def fun(v,h,d,theta):
rta= d*np.tan(theta) - (g*d**2)/(2*v**2*np.cos(theta)**2) -h
return rta
theta= np.linspace(0.1, np.pi/3, 10)
f_theta=[]
def grafica(fun,v,h,d):
for i in theta:
temp= fun(v,h,d,i)
f_theta.append(temp)
def apunta(fun,deriva,v,h,d):
cont=0.0
epsilon=0.000001
theta=1.0
fx=abs(fun(v,h,d,theta))
while(cont<1000 and epsilon<fx):
print("fx: ", fx)
delta_theta=-(fun(v,h,d,theta)/deriva(fun,v,h,d,theta))
theta+=delta_theta
fx=abs(fun(v,h,d,theta))
cont+=1
if(epsilon < fx):
return "No existe"
return theta
grafica(fun, 100.0, 4.0, 6.0)
plt.plot(theta, f_theta)
plt.savefig("funcion.png")
respuesta = apunta(fun,deriva,100.0, 4.0, 6.0)
print respuesta
| [
"n.cardonac@uniandes.edu.co"
] | n.cardonac@uniandes.edu.co |
c3f38e56682732a9af210122091dc6e960545e4e | 48dd9584497f24ea496fd5e1cc1441021676fcad | /Analizador_Sentimientos.py | a373e0620b4397e12918ecd67d79fded5ee50acf | [] | no_license | LeonardoSeveroJ/Analizador_sentimientos | f2ba3df5b92289e8b13cbf2211848e0cbc2e2383 | 442ba7fdfaf1b647c337048bb4414491ec0809aa | refs/heads/main | 2023-05-03T23:10:36.326967 | 2021-05-20T16:35:38 | 2021-05-20T16:35:38 | 369,269,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,271 | py | from textblob import TextBlob
pos_count =0
pos_correct=0
with open("positivas.txt", "r") as f:
for line in f.read().split('\n'):
analysis= TextBlob(line)
#print(line)
try:
eng=analysis.translate(to='en')
if eng.sentiment.polarity>0:
pos_correct += 1
pos_count += 1
except:
#mostramos este mensaje en caso de que se presente algun problema
print("El elemento no esta presente")
neg_count =0
neg_correct =0
with open("negativas.txt", "r") as f:
for line in f.read().split('\n'):
analysis= TextBlob(line)
#print(line)
try:
eng=analysis.translate(to='en')
if eng.sentiment.polarity>0:
neg_correct += 1
neg_count += 1
except:
#mostramos este mensaje en caso de que se presente algun problema
print("El elemento no esta presente")
print("Precision positviva= {}% via {} ejemplos".format(pos_correct/pos_count*100.0, pos_count))
print("Precision negativa= {}% via {} ejemplos".format(neg_correct/neg_count*100.0, neg_count))
| [
"noreply@github.com"
] | LeonardoSeveroJ.noreply@github.com |
48dbaefec286b4052925f830bcaff20de9f4aa31 | be460e66f05c0259cf45e6c0cdb653fc2913972d | /acm/Online-Judge/leetcode/code/3sum-closest.py | da6eefe4f323394f550962fb9cd8dec90bce8c80 | [] | no_license | Salvare219/CodeLibrary | 3247aee350402dac3d94e059a8dc97d5d5436524 | 8961a6d1718c58d12c21a857b23e825c16bdab14 | refs/heads/master | 2021-06-16T18:38:21.693960 | 2017-05-09T12:47:36 | 2017-05-09T12:47:36 | 81,569,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 747 | py | class Solution:
# @param {integer[]} nums
# @return {integer[][]}
def threeSum(self, num):
mp={}
n=len(num)
ans=[]
num.sort()
l1=-3214421
for i in xrange(n):
if l1!=num[i]:
l1=num[i]
l2=-312414
j=i+1
p=n-1
while j<p:
if l2!=num[j]:
l2=num[j]
while p>j and num[i]+num[j]+num[p]>0:
p=p-1
if p>j and num[i]+num[j]+num[p]==0:
ans.append([num[i],num[j],num[p]])
j=j+1
return ans | [
"635149007@qq.com"
] | 635149007@qq.com |
ace8389eaa08ece8e57061bce6d27225fb7361e4 | 8cec0d4cd23fcbc66a12de7e63b42be106c241fc | /react-app/node_modules/chokidar/node_modules/fsevents/build/config.gypi | 5c144079588994d957d70d3e267e777667f06d13 | [
"MIT"
] | permissive | zoefyz12/FoodOnlineOrdering | 2ebbd9bbd10729f46a92ba2e0ca235ff82dc058d | 07279dc59f9e60325f646e29dbbbdbcd8cada2ef | refs/heads/master | 2020-05-18T21:15:42.559720 | 2019-05-10T23:48:58 | 2019-05-10T23:48:58 | 184,656,054 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,848 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"debug_nghttp2": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_data_in": "../../deps/icu-small/source/data/in/icudt62l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_locales": "en,root",
"icu_path": "deps/icu-small",
"icu_small": "true",
"icu_ver_major": "62",
"llvm_version": "0",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "true",
"node_module_version": 64,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_large_pages": "false",
"node_use_openssl": "true",
"node_use_pch": "false",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "64.dylib",
"target_arch": "x64",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_typed_array_max_size_in_heap": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "7.0",
"nodedir": "/Users/zoezhao/.node-gyp/10.15.2",
"standalone_static_library": 1,
"fallback_to_build": "true",
"module": "/Users/zoezhao/Desktop/CS 157B/dsf/node_modules/chokidar/node_modules/fsevents/lib/binding/Release/node-v64-darwin-x64/fse.node",
"module_name": "fse",
"module_path": "/Users/zoezhao/Desktop/CS 157B/dsf/node_modules/chokidar/node_modules/fsevents/lib/binding/Release/node-v64-darwin-x64",
"napi_version": "3",
"node_abi_napi": "napi",
"save_dev": "",
"legacy_bundling": "",
"dry_run": "",
"viewer": "man",
"only": "",
"commit_hooks": "true",
"browser": "",
"also": "",
"sign_git_commit": "",
"rollback": "true",
"usage": "",
"audit": "true",
"globalignorefile": "/usr/local/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"metrics_registry": "https://registry.npmjs.org/",
"timing": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"preid": "",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"noproxy": "",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/usr/local/etc/npmrc",
"prefer_online": "",
"logs_max": "10",
"always_auth": "",
"global_style": "",
"cache_lock_retries": "10",
"update_notifier": "true",
"heading": "npm",
"audit_level": "low",
"searchlimit": "20",
"read_only": "",
"offline": "",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"allow_same_version": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/zoezhao/.npmrc",
"init_module": "/Users/zoezhao/.npm-init.js",
"cidr": "",
"user": "",
"node_version": "10.15.2",
"save": "true",
"ignore_prepublish": "",
"editor": "vi",
"auth_type": "legacy",
"tag": "latest",
"script_shell": "",
"progress": "true",
"global": "",
"searchstaleness": "900",
"optional": "true",
"ham_it_up": "",
"save_prod": "",
"force": "",
"bin_links": "true",
"searchopts": "",
"node_gyp": "/usr/local/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"depth": "Infinity",
"sso_poll_frequency": "500",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"sso_type": "oauth",
"save_prefix": "^",
"ca": "",
"scripts_prepend_node_path": "auto",
"save_exact": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"prefer_offline": "",
"cache_lock_stale": "60000",
"otp": "",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/zoezhao/.npm",
"color": "always",
"package_lock": "true",
"package_lock_only": "",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/6.4.1 node/v10.15.2 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"send_metrics": "",
"save_bundle": "",
"umask": "0022",
"node_options": "",
"init_version": "1.0.0",
"init_author_name": "",
"git": "git",
"scope": "",
"unsafe_perm": "true",
"tmp": "/var/folders/fp/x2w3v0xd04dgl529y59fl4hr0000gn/T",
"onload_script": "",
"prefix": "/usr/local",
"link": ""
}
}
| [
"zoefyz12@gmail.com"
] | zoefyz12@gmail.com |
8d5a07180aa031738e5bf580d60d297259850bd9 | eea1a7ef7fd6ca57d80bcb0d1210ac606726cfa6 | /matchingSite/matchingSite/wsgi.py | c156ee23ca4f1ad4f1f1ffcc8194d771df7dfcbb | [] | no_license | anthonytrann/Hobbies-Matching-Site | 642936caac55a6614c616ac0ea502f35c1c7916d | 2b3bf3a39cef1959d0f32feacd7343fce7630560 | refs/heads/master | 2022-03-07T04:09:06.720527 | 2019-10-10T10:29:44 | 2019-10-10T10:29:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | """
WSGI config for matchingSite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'matchingSite.settings')
application = get_wsgi_application()
| [
"33399044+anthonytran227@users.noreply.github.com"
] | 33399044+anthonytran227@users.noreply.github.com |
86ebc96a4d695c7c8ffbc7a2ec66ded29f647ee0 | 708e17ad98f3143abaf811357883e680991d711f | /python2/numOf1Bits.py | bc66d46f27d4da1f9187624f97807e78ac955bb5 | [] | no_license | yichuanma95/leetcode-solns | a363cc8e85f2e8cdd5d2cde6e976cd76d4c4ea93 | 6812253b90bdd5a35c6bfba8eac54da9be26d56c | refs/heads/master | 2021-05-24T18:05:02.588481 | 2020-10-08T00:39:58 | 2020-10-08T00:39:58 | 253,690,413 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | '''
Problem 191: Number of 1 Bits
Write a function that takes an unsigned integer and return the number of '1' bits it has (also
known as the Hamming weight).
Example 1:
Input: 00000000000000000000000000001011
Output: 3
Explanation: The input binary string 00000000000000000000000000001011 has a total of 3 '1' bits.
Example 2:
Input: 00000000000000000000000010000000
Output: 1
Explanation: The input binary string 00000000000000000000000010000000 has a total of 1 '1' bit.
Example 3:
Input: 11111111111111111111111111111101
Output: 31
Explanation: The input binary string 11111111111111111111111111111101 has a total of 31 '1' bits.
Try counting them.
Note: Note that in some languages such as Java, there is no unsigned integer type. In this case, the input will be given as signed integer type and should not affect your implementation, as the internal binary representation of the integer is the same whether it is signed or unsigned.
In Java, the compiler represents the signed integers using 2's complement notation. Therefore, in Example 3 above the input represents the signed integer -3.
Follow up: If this function is called many times, how would you optimize it?
'''
class Solution(object):
"""
:type n: int
:rtype: int
"""
def hammingWeight(self, n):
count = 0
while n > 0:
count += (n & 1)
n >>= 1
return count
| [
"ma.yich@husky.neu.edu"
] | ma.yich@husky.neu.edu |
18a6ebf81ae410eb0568d77b07495d5acfa35a1f | eb1771e8eb026e5cafe9cd6211a1bcc8f81ce117 | /dataproc/isolatepac_ancil.py | 1f3abdfdbb68498e769cf52e62cb776b1cd310b7 | [] | no_license | tammasloughran/analysis | a9c9200a15f0190caae0b1d22d9721511025b89d | 22884c83cc04625d29165e962b7868bc3a3dbd31 | refs/heads/master | 2021-09-20T12:46:15.299083 | 2018-08-10T01:55:00 | 2018-08-10T01:55:00 | 45,821,412 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,351 | py | """isolatepac_ancil.py uses HadISST SSTs to construct a composite of sst anomalies for
ENSO and IOD excluding each basin.
"""
import numpy as np
import netCDF4 as nc
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.dates import YearLocator, MonthLocator, DateFormatter
from mpl_toolkits.basemap import Basemap
import mpl_toolkits.basemap as bm
from scipy import signal
# Load data
hadisst_file = '/media/Jupiter/observations/HadISST/sst/HadISST_sst.nc'
hadisstnc = nc.Dataset(hadisst_file)
sst_ncvar = hadisstnc.variables['sst']
sst = sst_ncvar[:]
time_ncvar = hadisstnc.variables['time']
time = time_ncvar[:]
lats = hadisstnc.variables['lat'][:]
lons = hadisstnc.variables['lon'][:]
# Dates
time[-1] = time[-2] + (time[-2]-time[-3])
dates = nc.netcdftime.num2date(time, time_ncvar.units)
dates2 = pd.date_range(dates[0],dates[-1]+(dates[1]-dates[0]),freq='M')
# Select base period
base_period_sst = sst[(dates2.year>=1971)&(dates2.year<=2000),...]
base_period_dates = dates2[(dates2.year>=1971)&(dates2.year<=2000)]
# Calculate climatology
sst_clim = np.ones((12,)+base_period_sst.shape[-2:])*np.nan
for imonth in xrange(0,12):
sst_clim[imonth,...] = base_period_sst[base_period_dates.month==(imonth+1),...].mean(axis=0)
# Subtract climatology
month = 0
sst_absolute = sst.copy()
for imonth in xrange(0,dates2.shape[0]):
sst[imonth,...] = sst[imonth,...] - sst_clim[month,...]
if month<11: month += 1
else: month = 0
# Detrend
satera = dates2.year>=1970
sst = sst[satera] # Select 1970 and later
dates2 = dates2[satera]
mask = np.sum(sst.mask,axis=0)>0 # Create a mask for where data extsts for all time.
mask = np.broadcast_to(mask,sst.shape) # Broacast to shape of sst
sst = signal.detrend(sst,axis=0) # Detrend
sst = np.ma.array(sst,mask=mask) # Reapply mask
# Calculate Nino34 index
nino34_sst = sst[:,(lats<=5)&(lats>=-5),:]
nino34_sst = nino34_sst[:,:,(lons>=-170)&(lons<=-120)]
nino34 = np.zeros(nino34_sst.shape[0])
wght = lats[(lats<=5)&(lats>=-5)]
wght = np.broadcast_to(wght, (50,10)).T
wght = np.broadcast_to(wght, (540,10,50))
wght = np.cos(np.deg2rad(wght))
nino34_sst = nino34_sst*wght
for i in xrange(nino34_sst.shape[0]):
nino34[i] = np.sum(nino34_sst[i])/np.sum(wght[0])
# Plot Nino34
fig, ax = plt.subplots()
rnge = 528
ax.plot_date(dates[-rnge:], nino34[-rnge:], '-')
ax.plot_date(dates[-rnge:], np.zeros(rnge), 'k')
ax.plot_date(dates[-rnge:], np.ones(rnge)*nino34.std(), 'r--')
ax.plot_date(dates[-rnge:], -np.ones(rnge)*nino34.std(), 'g--')
ax.xaxis.set_major_locator(YearLocator())
ax.xaxis.set_major_formatter(DateFormatter('%Y'))
ax.autoscale_view()
ax.grid(True)
ax.set_xlabel('Date')
ax.set_ylabel('Nino 3.4 ($^\circ$C)')
fig.autofmt_xdate()
#plt.show()
# Define the years that have nonlinear lanina summers. The given year is the year containing January.
nlln_years = [2014, 2011, 2008, 2004, 1989, 1986, 1976, 1974]
nnllnyears = len(nlln_years)
#nlln_composite = np.zeros((24,)+sst_absolute.shape[1:])
#for year in nlln_years:
# sst_iyear = sst[(dates2.year==year)|(dates2.year==(year-1))]
# nlln_composite += sst_iyear
#nlln_composite /= nnllnyears
# Plot a JJA composite
#nllnpic = nlln_composite[5:8,...].mean(axis=0)
#nllnpic[nllnpic < -5.] = np.nan
#m = Basemap(projection='robin', lon_0=180.)
#m.drawcoastlines()
#parallels = np.arange(-90., 120., 30.)
#meridians = np.arange(0., 360., 30.)
#m.drawparallels(parallels,labels=[True,False,False,False], linewidth=0.3)
#meridians = m.drawmeridians(meridians,labels=[False,False,False,True], linewidth=0.3)
#for mr in meridians:
# try: meridians[mr][1][0].set_rotation(50)
# except: pass
#nllnpic, lons = bm.shiftgrid(1, nllnpic, lons)
#xx,yy = np.meshgrid(lons, lats)
#x,y = m(xx,yy)
#fcont = m.contourf(x,y,nllnpic,cmap='seismic',levels=np.arange(-1.5,1.7,0.2))
#m.contour(x,y,nllnpic,colors='k',levels=[0])
#m.fillcontinents(color='gray',lake_color='gray')
#cbar = plt.colorbar(fcont, orientation='horizontal')
#cbar.ax.get_xaxis().set_ticks([])
#for j, lab in enumerate([str(i) for i in np.arange(-1.5,1.7,0.2)]):
# cbar.ax.text((1/15.)*j, -0.5, lab, ha='center', va='center')
#plt.title('JJA SSTA Eastern Pacific La Nina Composite')
#plt.show()
# Load the anomalies
ensonc = nc.Dataset('/srv/ccrc/data35/z5032520/ancilforge/modal_anomalies.nc','r')
elnino = np.array(ensonc.variables['elnino_sst'][:])
elnino[elnino>100] = 0
lanina = np.array(ensonc.variables['lanina_sst'][:])
lanina[lanina>100] = 0
lats = ensonc.variables['lat'][:]
lons = ensonc.variables['lon'][:]
pac_mask = np.ones(elnino.shape)
# zero anomalies outside pacific +-30N of equator
pac_mask[:,lats>=30,:] = 0
pac_mask[:,lats<=-30,:] = 0
pac_mask[:,:,lons<=120] = 0
pac_mask[:,:,lons>=290] = 0
# Linear damping north south and westward
for i,j in enumerate(np.where((lats<30)&(lats>=20))[0]):
n = float(np.sum((lats<30)&(lats>=20)))
factor = (n-i)/n
pac_mask[:,j,(lons>120)&(lons<290)] = pac_mask[:,j,(lons>120)&(lons<290)]*factor
for i,j in enumerate(np.where((lats>-30)&(lats<=-20))[0]):
n = float(np.sum((lats<30)&(lats>=20)))
factor = 1+(1./n) - (n-i)/n
pac_mask[:,j,(lons>120)&(lons<290)] = pac_mask[:,j,(lons>120)&(lons<290)]*factor
for ii,i in enumerate(np.where((lons>120)&(lons<=140))[0]):
n = float(np.sum((lons>120)&(lons<=140)))
factor = 1+(1./n) - (n-ii)/n
pac_mask[:,(lats>-30)&(lats<30),i] = pac_mask[:,(lats>-30)&(lats<30),i]*factor
pac_mask[:,114:,208:] = 0 # remove caribean
pac_mask[:,105:,221:] = 0 # remove caribean
# Indo-pacific region
indopac_mask = np.ones(elnino.shape)
# zero anomalies outside pacific +-30N of equator
indopac_mask[:,lats>=30,:] = 0
indopac_mask[:,lats<=-30,:] = 0
indopac_mask[:,:,lons<=30] = 0
indopac_mask[:,:,lons>=290] = 0
# Linear damping north south and westward
for i,j in enumerate(np.where((lats<30)&(lats>=20))[0]):
n = float(np.sum((lats<30)&(lats>=20)))
factor = (n-i)/n
indopac_mask[:,j,(lons>30)&(lons<290)] = indopac_mask[:,j,(lons>30)&(lons<290)]*factor
for i,j in enumerate(np.where((lats>-30)&(lats<=-20))[0]):
n = float(np.sum((lats<30)&(lats>=20)))
factor = 1+(1./n) - (n-i)/n
indopac_mask[:,j,(lons>30)&(lons<290)] = indopac_mask[:,j,(lons>30)&(lons<290)]*factor
indopac_mask[:,114:,208:] = 0 # remove caribean
indopac_mask[:,105:,221:] = 0 # remove caribean
indopac_nino = elnino*indopac_mask
indopac_nina = lanina*indopac_mask
elnino = elnino*pac_mask
# There are still positive anomalies in the far western pacific
# that need to be removed.
for t in xrange(0,17):
for y in xrange(0,150):
for x in xrange(0,120):
if elnino[t,y,x]>0.: elnino[t,y,x] = 0
lanina = lanina*pac_mask
# And remove negative anomalies in far western Pacific.
for t in xrange(0,17):
for y in xrange(0,150):
for x in xrange(0,120):
if lanina[t,y,x]<0.: lanina[t,y,x] = 0
# Isolated indian ocean ssts
piod = np.array(ensonc.variables['piod_sst'][:])
piod[piod>100] = 0
niod = np.array(ensonc.variables['niod_sst'][:])
niod[niod>100] = 0
ind_mask = np.ones(elnino.shape)
# zero anomalies outside indian ocean +-30N of equator
ind_mask[:,lats>=30,:] = 0
ind_mask[:,lats<=-30,:] = 0
ind_mask[:,:,lons<=30] = 0
ind_mask[:,:,lons>=130] = 0
# Linear damping north south and eastward
for i,j in enumerate(np.where((lats<30)&(lats>=20))[0]):
n = float(np.sum((lats<30)&(lats>=20)))
factor = (n-i)/n
ind_mask[:,j,(lons>30)&(lons<130)] = ind_mask[:,j,(lons>30)&(lons<130)]*factor
for i,j in enumerate(np.where((lats>-30)&(lats<=-20))[0]):
n = float(np.sum((lats<30)&(lats>=20)))
factor = 1+(1./n) - (n-i)/n
ind_mask[:,j,(lons>30)&(lons<130)] = ind_mask[:,j,(lons>30)&(lons<130)]*factor
for ii,i in enumerate(np.where((lons>110)&(lons<=130))[0]):
n = float(np.sum((lons>110)&(lons<=130)))
factor = (n-ii)/n
ind_mask[:,(lats>-30)&(lats<30),i] = ind_mask[:,(lats>-30)&(lats<30),i]*factor
ind_mask[:,91:,83:] = 0 # Remove south China sea
ind_mask[:,93:,81:] = 0 # South China sea
ind_mask[:,89:,85:] = 0 # South China
piod = piod*ind_mask
niod = niod*ind_mask
# Save to file
isonc = nc.Dataset('isolated_anomalies.nc','w')
isonc.createDimension('time',24)
isonc.createDimension('lat',lats.shape[0])
isonc.createDimension('lon',lons.shape[0])
tout = isonc.createVariable('time','f8',('time'))
setattr(tout, 'units', 'month')
tout[:] = range(1,25)
latout = isonc.createVariable('lat','f8',('lat'))
setattr(latout, 'units', 'degrees_north')
latout[:] = lats
lonout = isonc.createVariable('lon','f8',('lon'))
setattr(lonout, 'units', 'degrees_east')
lonout[:] = lons
elninoout = isonc.createVariable('elnino_sst','f8',('time','lat','lon'))
setattr(elninoout, 'units', 'degC')
elninoout[:] = elnino
laninaout = isonc.createVariable('lanina_sst','f8',('time','lat','lon'))
setattr(laninaout, 'units', 'degC')
laninaout[:] = lanina
piodout = isonc.createVariable('piod_sst','f8',('time','lat','lon'))
setattr(piodout, 'units', 'degC')
piodout[:] = piod
niodout = isonc.createVariable('niod_sst','f8',('time','lat','lon'))
setattr(niodout, 'units', 'degC')
niodout[:] = niod
ipelout = isonc.createVariable('indopac_nino','f8',('time','lat','lon'))
setattr(ipelout, 'units', 'degC')
ipelout[:] = indopac_nino
iplaout = isonc.createVariable('indopac_nina','f8',('time','lat','lon'))
setattr(iplaout, 'units', 'degC')
iplaout[:] = indopac_nina
isonc.close()
| [
"t.loughran@student.unsw.edu.au"
] | t.loughran@student.unsw.edu.au |
c0f54e11c4033213b344084214d8c45b6b3098ec | 3e2671c4881b8e010aae81011560fe3c317f83f0 | /Lane Detection - IGVC/GRayLane.py | 68eca2f26beade3ec48b9afaaf716919c5a42aea | [] | no_license | Lajith-Puthuchery/Open-CV | 7dc1c92f8a92d3cb7305cb4b48084c92d29d46b1 | 9edc9aa6c088306bf760cc9959bb3591e28c2ed7 | refs/heads/main | 2023-06-07T04:08:20.749240 | 2021-07-07T13:18:13 | 2021-07-07T13:18:13 | 382,853,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,682 | py | import numpy as np
import cv2
cap = cv2.VideoCapture('/home/lajith/Downloads/lane_vgt.mp4')
def ROI(frame,vertices) :
mask=np.zeros_like(frame)
match_mask_color=(255,)
cv2.fillPoly(mask,vertices,match_mask_color)
masked=cv2.bitwise_and(frame,mask)
return masked
while cap.isOpened():
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame (stream end?). Exiting ...")
break
height=frame.shape[0]
width=frame.shape[1]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
lower_hsv = np.array([0, 44, 126])
higher_hsv = np.array([179, 96, 248])
#ask = cv2.inRange(hsv, lower_hsv, higher_hsv)
#res = cv2.bitwise_and(frame,frame, mask= mask)
kernel5 = np.ones((3,3),np.uint8)/9
kernel = np.ones((5,5),np.uint8)/25
kernel2 = np.ones((7,7),np.uint8)/49
kernel3 = np.ones((5,5),np.float32)/25
kernel4 = np.ones((11,11),np.uint8)/121
ret, thresh = cv2.threshold(gray, 185, 255, cv2.THRESH_BINARY)
cv2.imshow("thresh",thresh)
g_blur = cv2.GaussianBlur(thresh,(7,7),0)
cv2.imshow('g_blur',g_blur)
closing = cv2.morphologyEx(g_blur, cv2.MORPH_CLOSE, kernel2)
#cv2.imshow('closing',closing)
dilation = cv2.dilate(closing,kernel,iterations = 1)
#cv2.imshow('dilation',dilation)
#erosion = cv2.erode(dilation, kernel, iterations=1)
canny = cv2.Canny(dilation,100,150)
cv2.imshow('canny',canny)
ROI_vertices=[(0,height),(width,height),(width,height-300),(0,height-300)]
ROI_image=ROI(canny,np.array([ROI_vertices],np.int32))
rho = 1 # distance resolution in pixels of the Hough grid
theta = np.pi / 180 # angular resolution in radians of the Hough grid
threshold = 15 # minimum number of votes (intersections in Hough grid cell)
min_line_length = 20 # minimum number of pixels making up a line
max_line_gap = 20 # maximum gap in pixels between connectable line segments
line_image = np.copy(frame) * 0 # creating a blank to draw lines on
# Run Hough on edge detected image
# Output "lines" is an array containing endpoints of detected line segments
lines = cv2.HoughLinesP(ROI_image, rho, theta, threshold, np.array([]),
min_line_length, max_line_gap)
for line in lines:
for x1,y1,x2,y2 in line:
cv2.line(line_image,(x1,y1),(x2,y2),(255,0,0),5)
lines_edges = cv2.addWeighted(frame, 0.8, line_image, 1, 0)
cv2.imshow("Lane",lines_edges)
if cv2.waitKey(1) == ord('q'):
break
cap.release()
cv2.destroyAllWindows() | [
"noreply@github.com"
] | Lajith-Puthuchery.noreply@github.com |
2f1752c2bb239fc015ffd2b4a91ad7011d473660 | 177c2393fb86c6fbcc1e995a60805e4a6b901aae | /Sesion-02/Banco/tarjeta/views.py | d3de642d321bb86fe5f489875cbfc164f93fcf53 | [] | no_license | rctorr/TECP0008FUPYCMX2001 | bc6a11e33486be2ccb5d2b79a9a64ba5e48a24f4 | 419635f02de189b91a06a9b366950320dfb0e00e | refs/heads/master | 2023-05-20T22:37:25.194835 | 2021-06-08T03:12:37 | 2021-06-08T03:12:37 | 370,964,281 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | from django.shortcuts import render
# Create your views here.
def index(request):
""" Atiende la petición GET / """
return render(request, "tarjeta/index.html") | [
"rictor@cuhrt.com"
] | rictor@cuhrt.com |
d48bd8e1a10fe5636e78061e2a79feeab8d3e02d | 24d8bc5f09f8b8e484ab76aa317c5436a7ddf43c | /Code-it/Divide and Conquer - 합병정렬.py | 0ac05550ca18b70aa1d20ad9d0ac5ca909142ac9 | [] | no_license | GroundP/Algorithm | c00de37256a193c42051eb58aa8157d0d1882f67 | 1dd4d22141a43be99a9aaff6ba00e04b66e4cccb | refs/heads/master | 2020-07-27T09:21:28.814675 | 2019-10-04T16:07:13 | 2019-10-04T16:07:13 | 209,044,115 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,209 | py | def merge(list1, list2):
if len(list1) == 0:
return list2
if len(list2) == 0:
return list1
sortlist = []
idx1 = 0
idx2 = 0
while idx1 < len(list1) and idx2 < len(list2):
if list1[idx1] < list2[idx2]:
sortlist.append(list1[idx1])
idx1 += 1
elif list1[idx1] > list2[idx2]:
sortlist.append(list2[idx2])
idx2 += 1
else:
sortlist.append(list1[idx1])
sortlist.append(list2[idx2])
idx1 += 1
idx2 += 1
if idx2 < len(list2):
sortlist += list2[idx2:]
if idx1 < len(list1):
sortlist += list1[idx1:]
return sortlist
# 합병 정렬
def merge_sort(my_list):
if len(my_list) < 2:
return my_list
mid = len(my_list) // 2
leftSide = my_list[:mid]
rightSide = my_list[mid:]
left = merge_sort(leftSide)
right = merge_sort(rightSide)
return merge(left, right)
# 테스트
print(merge_sort([1, 3, 5, 7, 9, 11, 13, 11]))
print(merge_sort([28, 13, 9, 30, 1, 48, 5, 7, 15]))
print(merge_sort([2, 5, 6, 7, 1, 2, 4, 7, 10, 11, 4, 15, 13, 1, 6, 4]))
| [
"bloom0819@naver.com"
] | bloom0819@naver.com |
2997f975affa3c8c14af0c23c4a28115583d55a3 | 137afabda1a53b4de09bed7cdf3c14e6c98111c8 | /reciprocity/recipe/migrations/0011_auto_20160504_1754.py | b5c4c50f30dcf396373d5c18f7387b93d90179c5 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | TeamReciprocity/reciprocity | b0c7a205a3c843fd776bce8ff7edb0b051019641 | 28a32e5b1f8a4088decc7b32cffae860e16c9c2b | refs/heads/master | 2021-01-01T04:44:28.620391 | 2016-05-26T23:13:05 | 2016-05-26T23:13:05 | 56,874,379 | 1 | 1 | null | 2016-05-26T23:13:05 | 2016-04-22T17:43:48 | Python | UTF-8 | Python | false | false | 511 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-05-05 00:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipe', '0010_auto_20160504_1730'),
]
operations = [
migrations.AlterField(
model_name='recipe',
name='ancestors',
field=models.ManyToManyField(blank=True, related_name='_recipe_ancestors_+', to='recipe.Recipe'),
),
]
| [
"wohlfea@gmail.com"
] | wohlfea@gmail.com |
eda95050e4aab8b87d5ef0e01e7b3dd35478760f | d0bd9c3c5539141c74e0eeae2fa6b7b38af84ce2 | /tests/test_data/test_molecular_weight.py | 6743fdd25cc4c253454be447b8c4c52f01c76f3a | [
"BSD-3-Clause"
] | permissive | KaneWh1te/cogent3 | 150c72e2f80a6439de0413b39c4c37c09c9966e3 | 115e9eb5700627fdb24be61441a7e3e155c02c61 | refs/heads/master | 2023-07-29T00:32:03.742351 | 2021-04-20T04:32:00 | 2021-04-20T04:32:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | #!/usr/bin/env python
"""Tests for molecular weight.
"""
from unittest import TestCase, main
from cogent3.data.molecular_weight import ProteinMW, RnaMW
__author__ = "Rob Knight"
__copyright__ = "Copyright 2007-2021, The Cogent Project"
__credits__ = ["Rob Knight"]
__license__ = "BSD-3"
__version__ = "2021.04.20a"
__maintainer__ = "Gavin Huttley"
__email__ = "Gavin.Huttley@anu.edu.au"
__status__ = "Production"
from numpy.testing import assert_allclose
class WeightCalculatorTests(TestCase):
"""Tests for WeightCalculator, which should calculate molecular weights."""
def test_call(self):
"""WeightCalculator should return correct molecular weight"""
r = RnaMW
p = ProteinMW
self.assertEqual(p(""), 0)
self.assertEqual(r(""), 0)
assert_allclose(p("A"), 89.09)
assert_allclose(r("A"), 375.17)
assert_allclose(p("AAA"), 231.27)
assert_allclose(r("AAA"), 1001.59)
assert_allclose(r("AAACCCA"), 2182.37)
assert_allclose(
p(
"MVQQAESLEAESNLPREALDTEEGEFMACSPVALDESDPDWCKTASGHIKRPMNAFMVWSKIERRKIMEQSPDMHNAEISKRLGKR\
WKMLKDSEKIPFIREAERLRLKHMADYPDYKYRPRKKPKMDPSAKPSASQSPEKSAAGGGGGSAGGGAGGAKTSKGSSKKCGKLKA\
PAAAGAKAGAGKAAQSGDYGGAGDDYVLGSLRVSGSGGGGAGKTVKCVFLDEDDDDDDDDDELQLQIKQEPDEEDEEPPHQQLLQP\
PGQQPSQLLRRYNVAKVPASPTLSSSAESPEGASLYDEVRAGATSGAGGGSRLYYSFKNITKQHPPPLAQPALSPASSRSVSTSSS\
SSSGSSSGSSGEDADDLMFDLSLNFSQSAHSASEQQLGGGAAAGNLSLSLVDKDLDSFSEGSLGSHFEFPDYCTPELSEMIAGDWL\
EANFSDLVFTY"
),
46685.97,
)
# run if called from command-line
if __name__ == "__main__":
main()
| [
"Gavin.Huttley@anu.edu.au"
] | Gavin.Huttley@anu.edu.au |
687ceb831757b55f605ca7582082dad5862e647f | aab50ee31f6107fd08d87bd4a93ded216eebb6be | /com/baizhi/杜亚博作业/杜亚博_9.5/杜亚博_9.24.py | e22353a598b8ddc8afb188119acb08d6855be8a8 | [] | no_license | dyb-py/pk | deaf50988694475bdfcda6f2535ba0e728d79931 | b571b67a98fa0be6d73cccb48b66386bc4dfd191 | refs/heads/master | 2020-12-29T15:06:20.294353 | 2020-02-06T09:05:08 | 2020-02-06T09:05:08 | 238,646,794 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,139 | py | #1
# SyntaxError 语法错误
#2
#下标越界 IndexError
#3
#属性错误 AttributeError
#4
#KeyError
#5
# SyntaxError
#6
# NameError
#7
#
# UnboundLocalError
#8
# try-except 之类的语句来处理异常
#9
# 可以, 因为异常有多种 多个except可以捕捉多个异常
#10
#自定义异常类 直接或间接继承BaseException
#except捕捉一切异常
#except(异常1,异常2.。。)
#11
#不建议 可读性极差 出现异常后 不清楚是那种异常 不好维护
#12
#用finally关闭文件
#13
# try:
# for i in range(3):
# for j in range(3):
# if i==2:
# raise KeyboardInterrupt
# print(i,j)
# except KeyboardInterrupt:
# print('退出了')
#14
# def in_input():
# a=input('输入整数')
# try:
# a=int(a)
# print(a)
# except:
# print('输入的不是整数')
# in_input()
#15
#出现NameError
# try:
# f=open('my.txt')
# print(f.read())
# except OSError as reason:
# print('出错了'+str(reason))
# finally:
# try:
# f.close()
# except NameError as e :
# print(e) | [
"m15935117233@163.com"
] | m15935117233@163.com |
4a109b25566b48449fcf3d5d6534a0e68f5e82a2 | 4341e83359d5dddbfb67998e456d4d053fb73b71 | /web_deployment/admin/catlog/apps.py | 662d3e5de0afd4a124f9e2f58e8bf8838c155df9 | [] | no_license | ArulFranklin08/django-deployments | 1c1ef80356f37b33d9ee86bcd65f032b4d1d31ee | 74938f5d946af646d7a54be0766b6b1dde59ff3d | refs/heads/main | 2023-02-01T17:40:39.045299 | 2020-12-15T07:46:20 | 2020-12-15T07:46:20 | 321,590,952 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 109 | py | #!/usr/bin/env python
from django.apps import AppConfig
class CatlogConfig(AppConfig):
name = 'catlog'
| [
"arulfranklin1996@gmail.com"
] | arulfranklin1996@gmail.com |
c0fed2ef8924d494a808d754371b7138465de518 | 3e0158911b600897affc6e75a11d86f8bfaa1e74 | /LiPlate-master/modules/Plate.py | f9301f2e30b34f390de81c19e3f90571531df5d1 | [
"MIT"
] | permissive | pchaow/HornbillCourse | de878b3dad8a548c76feb2b7a0c103dd18916b21 | 0a05e8cb6dc43fa4643671697721d672d181f6e7 | refs/heads/master | 2020-03-21T07:05:42.654497 | 2018-06-30T04:17:20 | 2018-06-30T04:17:20 | 138,260,185 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,432 | py | import cv2;
import numpy as np;
import logging;
import pytesseract as tes;
from PIL import Image;
from modules.TrainingCharacter import *;
from matplotlib import pyplot as plt;
from copy import deepcopy, copy;
from logging.config import fileConfig;
# logger setup
fileConfig("logging_config.ini");
logger = logging.getLogger();
class Plate:
""" Class for the license plates """
def __init__(self, image): ### Plate Class Vars ###
self.original_image = image; # original image of analysis
self.plate_located_image = deepcopy(image); # original image with plate hilighted
self.plate_image = None; # license plate cropped
self.plate_image_char = None; # license plate cropped, chars outlined
self.gray_image = None; # original image - grayscale for analysis
self.plate_number = ""; # plate number
self.roi = []; # regions of interest for plates
self.plate_characters = []; # cropped images of characters on plate
logger.info("New plate created.");
""" Converts original image to grayscale for analysis """
def grayImage(self, image):
logger.info("Image converted to grayscale");
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY);
""" Algorithm to find plate and read characters """
def plateSearch(self, characters_array):
self.findContour();
self.cropPlate();
if self.plate_image is not None:
self.readPlateNumber(characters_array);
self.showResults();
return True;
""" Searches for a contour that looks like a license plate
in the image of a car """
def findContour(self):
self.gray_image = self.grayImage(deepcopy(self.original_image));
self.gray_image = cv2.medianBlur(self.gray_image, 5);
self.gray_image = cv2.adaptiveThreshold(self.gray_image, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 43,2);
_,contours,_ = cv2.findContours(self.gray_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
w,h,x,y = 0,0,0,0;
for contour in contours:
area = cv2.contourArea(contour);
# rough range of areas of a license plate
if area > 6000 and area < 40000:
[x,y,w,h] = cv2.boundingRect(contour);
# rough dimensions of a license plate
if w > 100 and w < 200 and h > 60 and h < 100:
self.roi.append([x,y,w,h]);
cv2.rectangle(self.plate_located_image, (x,y), (x+w, y+h), (0,255,0), 10);
logger.info("%s potential plates found.", str(len(self.roi)));
return True;
""" If a license plate contour has been found, crop
out the contour and create a new image """
def cropPlate(self):
if len(self.roi) > 1:
[x,y,w,h] = self.roi[0];
self.plate_image = self.original_image[y:y+h,x:x+w];
self.plate_image_char = deepcopy(self.plate_image);
return True;
""" Subalgorithm to read the license plate number using the
cropped image of a license plate """
def readPlateNumber(self, characters_array):
self.findCharacterContour();
self.tesseractCharacter();
return True;
""" Crops individual characters out of a plate image
and converts it to grayscale for comparison """
def cropCharacter(self, dimensions):
[x,y,w,h] = dimensions;
character = deepcopy(self.plate_image);
character = deepcopy(character[y:y+h,x:x+w]);
return character;
""" Finds contours in the cropped image of a license plate
that fit the dimension range of a letter or number """
def findCharacterContour(self):
gray_plate = self.grayImage(deepcopy(self.plate_image));
gray_plate = cv2.GaussianBlur(gray_plate, (3,3), 0);
_,threshold = cv2.threshold(gray_plate, 140, 255, 0);
_,contours,_ = cv2.findContours(threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE);
w,h,x,y = 0,0,0,0;
logger.info("%s contours found.", str(len(contours)));
for contour in contours:
area = cv2.contourArea(contour);
# rough range of areas of a plate number
if area > 120 and area < 2000:
[x,y,w,h] = cv2.boundingRect(contour);
# rough dimensions of a character
if h > 20 and h < 90 and w > 10 and w < 50:
character = self.cropCharacter([x,y,w,h]);
self.plate_characters.append([x, character]);
cv2.rectangle(self.plate_image_char, (x,y), (x+w, y+h), (0,0,255), 1);
logger.info("%s plate characters found", str(len(self.plate_characters)));
return True;
""" Tesseract: reads the character using the Tesseract libary """
def tesseractCharacter(self):
self.plate_characters = sorted(self.plate_characters, key=lambda x: x[0]); # sort contours left to right
for character in self.plate_characters[:8]: # only first 8 contours
char_image = Image.fromarray(character[1]);
char = tes.image_to_string(char_image, config='-psm 10');
self.plate_number += char.upper();
return True;
""" Subplot generator for images """
def plot(self, figure, subplot, image, title):
figure.subplot(subplot);
figure.imshow(image);
figure.xlabel(title);
figure.xticks([]);
figure.yticks([]);
return True;
""" Show our results """
def showResults(self):
plt.figure(self.plate_number);
self.plot(plt, 321, self.original_image, "Original image");
self.plot(plt, 322, self.gray_image, "Threshold image");
self.plot(plt, 323, self.plate_located_image, "Plate located");
if self.plate_image is not None:
self.plot(plt, 324, self.plate_image, "License plate");
self.plot(plt, 325, self.plate_image_char, "Characters outlined");
plt.subplot(326);plt.text(0,0,self.plate_number, fontsize=30);plt.xticks([]);plt.yticks([]);
plt.tight_layout();
plt.show();
return True;
| [
"chaow.po@up.ac.th"
] | chaow.po@up.ac.th |
3885f3d0e07e27d464ae419a9daf01151fcba185 | a16edf6c57fde65ef844a32f03f1b4ebdf0e49e8 | /AbsFactory/new/SvgDiagramFactory.py | bf5190fee8047649836aa354844b3452552aa7e2 | [] | no_license | BlackKnight7/PyInPractice | d50a1b088926f38f2a57c68677f74fde52b6efe7 | dfb40e9cec132bb5bd17344f69965d56288d3bef | refs/heads/master | 2021-01-13T00:59:21.113981 | 2016-01-15T08:39:08 | 2016-01-15T08:39:08 | 47,543,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,289 | py | import svgwrite
from new.DiagramFactory import DiagramFactory
class SvgDiagramFactory(DiagramFactory):
class Diagram:
def __init__(self, width, height):
self.height = height
self.width = width
self.dwg = svgwrite.Drawing('test.svg', profile='tiny')
def add(self, component):
self.dwg.add(component.rows)
def save(self):
self.dwg.save()
class Rectangle:
def __init__(self, x, y, width, height, fill, stroke):
self.x = x
self.y = y
self.width = width
self.height = height
self.rows = svgwrite.shapes.Rect(insert=(x, y), size=(width, height))
class Text:
def __init__(self, x, y, text, fontsize):
self.x = x
self.y = y
self.rows = svgwrite.text.Text(text=text, insert=(x, y), fill='blue')
@classmethod
def make_diagram(Class, width, height):
return Class.Diagram(width, height)
@classmethod
def make_rectangle(Class, x, y, width, height, fill="white", stroke="black"):
return Class.Rectangle(x, y, width, height, fill, stroke)
@classmethod
def make_text(Class, x, y, text, fontsize=12):
return Class.Text(x, y, text, fontsize)
| [
"blackknight7.h@gmail.com"
] | blackknight7.h@gmail.com |
804f299a000d0ba44bb63c77ff71cd58cf6d1531 | ac9d68d3b0e1c820ef12d03750d1ef2416fc8077 | /WhileLoop.py | e8ad0f75c101e62f2f6943b92ceb7faf8b963215 | [] | no_license | ritvik2/python | 305e32d123dd1c72ac80a597813b30e5d587763e | 1c76a3e0255f557615f3f54ce9d20d25f877e677 | refs/heads/master | 2020-05-23T18:20:35.337667 | 2019-05-15T20:35:38 | 2019-05-15T20:35:38 | 186,885,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | i = -1
while i > -11 :
print (i)
i += -1 | [
"ritviksharma1998@gmail.com"
] | ritviksharma1998@gmail.com |
6ebc8f5808fc7d535a02fe22eab30f0d67306ae7 | fcee889ed93812338cdce636cce2a326fc93e276 | /course/great/great/urls.py | 5a4f017bb86a45eddee96f7b0207d370b8cf0df5 | [] | no_license | jabir-dev/Django-web | d026a804df8e98a45212e7d9784b66de20c513c5 | 03961abb2d58f4bd3319af1c29173a209b66611a | refs/heads/master | 2022-12-14T22:22:46.732311 | 2020-09-11T19:26:04 | 2020-09-11T19:26:04 | 294,793,629 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | """great URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from article import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index)
]
| [
"cas@localhost.localdomain"
] | cas@localhost.localdomain |
32884dc13e56f62e86945d9b1a8ebb74d8a146e5 | e7cec275884cc5c8f232c98a54604d01533a70bc | /inquiries/inquiry5/compare_gmm_msd.py | f47daa0b4413723d746d2d8405740d8dfa968df7 | [] | no_license | Blightful/CS3535 | 2d5f11eb44714abd6c73fa4985657bc06402796d | b182f5d2c174d3fb4e6fa7c40449adf2b8af9e75 | refs/heads/master | 2020-05-17T22:45:56.918134 | 2015-04-28T17:58:59 | 2015-04-28T17:58:59 | 30,601,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,529 | py | """
Recursively pulls timbre from hdf5 files in the MSD directory structure and
compares them to the GMM of the other hdf5.
Prints the log
For example:
$ python all_pitches_msd.py /u/classes/3535/modules/MillionSongSubset/data/
Based on all_pitches_msd.py code by Mitch Parry 2015-02-12
code for finding timbre and saving to pickle by Walt Scarboro
"""
try:
import cPickle as pickle
except ImportError as e:
import pickle
from pyechonest import config
config.ECHO_NEST_API_KEY='UV6BRCEQPDZ29OJJ9'
import pyechonest
import echonest.remix.audio as audio
import sys
import os
import hdf5_getters
import numpy as np
from sklearn import mixture
from pprint import pprint
import math
import numpy
# import build_gmm_msd
# queryHDF5 = ""
# queryTimbre = []
def _is_hdf5(f):
_, ext = os.path.splitext(f)
ext = ext[1:] # drop leading '.'
return ext == 'h5'
def _get_one_timbre(f):
with hdf5_getters.open_h5_file_read(f) as h5:
timbre = hdf5_getters.get_segments_timbre(h5)
return timbre
def _get_one_title(f):
with hdf5_getters.open_h5_file_read(f) as h5:
title = hdf5_getters.get_title(h5)
return title
def _get_one_artist(f):
with hdf5_getters.open_h5_file_read(f) as h5:
artist = hdf5_getters.get_artist_name(h5)
return artist
def compare_gmm(g, t, artist, title):
"""
modelA = queryGMM
modelB = g
a = queryTimbre
b = t
"""
lenA = len(queryTimbre)
lenB = len(t)
scoreAa = queryGMM.score(queryTimbre)
valAa = 0;
for pt in range(0, len(scoreAa)):
valAa += scoreAa[pt]
scoreAb = g.score(queryTimbre)
valAb = 0;
for pt2 in range(0, len(scoreAb)):
valAb += scoreAb[pt2]
scoreBa = queryGMM.score(t)
valBa = 0;
for pt3 in range(0, len(scoreBa)):
valBa += scoreBa[pt3]
scoreBb = g.score(t)
valBb = 0;
for pt4 in range(0, len(scoreBb)):
valBb += scoreBb[pt4]
prob = (valAb - valAa) + (valBa - valBb)
probadj = (valAb - valAa)/(2.00 * lenA) + (valBa - valBb)/(2.00 * lenB)
values_arr = [artist, title, math.exp(probadj)]
viewer_arr.append(values_arr)
# print
# print 'Song similarity (largest = most similar): ' + str(prob)
# print 'Song similarity adjusted(largest = most similar): ' + str(math.exp(probadj))
# print
def get_all_timbre(directory):
"get the timbre for each hdf5 file in this or any subdirectories"
timbre = []
for f in os.listdir(directory):
path = os.path.join(directory, f)
# print path
if _is_hdf5(f):
"""
This unpickles the model from above hdf5 file name concatenated
with GMM. Only works after you have built your GMM
"""
gmm = pickle.load(open(str(f[:-3].upper()) + 'GMM' + ".p", 'rb'))
artist = _get_one_artist(path)
title = _get_one_title(path)
timbre = gmm.sample(1000)
compare_gmm(gmm, timbre, artist, title)
elif os.path.isdir(path):
p = get_all_timbre(path)
else:
continue
if __name__ == '__main__':
if len(sys.argv) == 1:
print 'usage: python compare_gmm_msd.py path_to_MSD'
else:
global queryHDF5
global queryGMM
global queryTimbre
global viewer_arr
viewer_arr = [[None for i in range(3)] for j in range(1)]
# viewer_arr = [[3]]
# queryHDF5 = sys.argv[2]
"""
until querying works for values more simple than HDF5 names just modify the value of below file name.
"""
###########################################
# Audio File Local Search
audio_file = audio.LocalAudioFile('tom petty.mp3')
timbreA = audio_file.analysis.segments.timbre
a = numpy.array(timbreA)
modelA = mixture.GMM(n_components=3, covariance_type='diag', random_state=None,
thresh=0.01, min_covar=0.001, n_iter=1000, n_init=1, params='wmc',
init_params='wmc')
modelA.fit(a)
queryGMM = modelA
queryTimbre = modelA.sample(1000)
##############################################
# By HDF5 File Name
# gmm = pickle.load(open('TRAAAAW128F429D538GMM.p', 'rb'))
# queryGMM = gmm
# queryTimbre = gmm.sample(1000)
get_all_timbre(sys.argv[1])
arr = sorted(viewer_arr, key=lambda x: x[2], reverse=True)
pprint(arr)
| [
"scarborowj@CS333-5.appd.appstate.edu"
] | scarborowj@CS333-5.appd.appstate.edu |
1109e88a856d43d73e4831b20c9d110d965a7b00 | 98eb6cc1d06ae676be42bd3092fdfd74cfcd685e | /settings.py | 89a6b4566c3b9aa8d402d6098dd13fb890597647 | [] | no_license | wickywinner/Travello | a9afa275fbb3cc5b6423b131812a39ca5d643433 | a23f8d6944163314ec13b57664f36a4b3cf9a524 | refs/heads/master | 2022-11-01T11:37:17.614869 | 2020-06-20T07:39:33 | 2020-06-20T07:39:33 | 273,657,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,202 | py | """
Django settings for travello project.
Generated by 'django-admin startproject' using Django 3.0.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w!33#*#oh^(s^*7zrvgmuzirokbugc2wi(b*=h%t&g3xzhyi78'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'travello.apps.TravelloConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'travello.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR,'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'travello.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'wicky',
'USER': 'postgre',
'PASSWORD': '3456',
'HOST': 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
| [
"winner312.com@gmail.com"
] | winner312.com@gmail.com |
e2c612b4b56d248673734a60e6277546ca92928b | 18802beeb0cdc287720994cf3e83f12c6cd344e0 | /Div and Mod Exercise.py | a34666b85ce8b19de265f8e57c9a78e62d203f92 | [] | no_license | KavilanNaidoo/Assignment | 18811edaf494baa6589fd801a074e550835fff71 | b920896120bb5d96af12232ec3add3860cb0f90d | refs/heads/master | 2016-09-06T11:24:40.725559 | 2014-09-25T20:15:33 | 2014-09-25T20:15:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | #Kavilan Naidoo
#12-09-2014
#X_DIV_Y_X_MOD_Y
number_1= int(input("Please enter a number: "))
number_2= int(input("Please enter another number: "))
div= number_1//number_2
mod= number_1%number_2
print("The whole number is {0} and the remainder is {1}.".format(div,mod))
| [
"40904@PC004152.coll.lr.local"
] | 40904@PC004152.coll.lr.local |
78bbb210835cbd018d7ecaa35e73e284314cb1d1 | 4cf85b739fc7a1238f1c4447881178398c477844 | /backend/users/models.py | ac901bd3fd656b06c7790afaa510ca8a185fab11 | [] | no_license | juanrios15/jwt-users | 0a8de5f64e7a43fcfc47670e2fa7872db3a2b67a | 4eb4ec91c13fab4d8fc491eb24557364f10ec6a3 | refs/heads/main | 2023-06-20T15:03:19.806701 | 2021-07-11T04:35:42 | 2021-07-11T04:35:42 | 382,943,449 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,632 | py | from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
# Create your models here.
class UserManager(BaseUserManager, models.Manager):
def _create_user(self, email, password, is_staff, is_superuser, is_active, **extra_fields):
user = self.model(
email=email,
is_staff=is_staff,
is_superuser=is_superuser,
is_active=is_active,
**extra_fields
)
user.set_password(password)
user.save(using=self.db)
return user
def create_user(self, email, password=None, **extra_fields):
return self._create_user(email, password, False, False, True, **extra_fields)
def create_superuser(self, email, password=None, **extra_fields):
return self._create_user(email, password, True, True, True, **extra_fields)
class NewUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(_('email_address'), unique=True)
user_name = models.CharField(max_length=150, unique=True)
first_name = models.CharField(max_length=150, blank=True)
start_date = models.DateTimeField(default=timezone.now)
about = models.TextField(_('about'), max_length=500, blank=True)
is_staff = models.BooleanField(default=False)
is_active = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['user_name', 'first_name']
def __str__(self):
return self.user_name | [
"juankrios15@gmail.com"
] | juankrios15@gmail.com |
0d13290dc44ec8593cb073a1ca94340e77ff9ea9 | 146db8cd0f775e0c4d051ea675bd147f1cb08503 | /old/md-service/SMDS/timestamp.py | e0bd0bdf1daac67b56a62833456e03a8e2b03702 | [
"Apache-2.0"
] | permissive | syndicate-storage/syndicate | 3cb4a22a4ae0d92859f57ed4634b03f1665791e4 | 4837265be3e0aa18cdf4ee50316dbfc2d1f06e5b | refs/heads/master | 2020-04-14T19:59:42.849428 | 2016-03-24T15:47:49 | 2016-03-24T15:47:49 | 10,639,326 | 16 | 6 | null | null | null | null | UTF-8 | Python | false | false | 5,873 | py | #
# Utilities to handle timestamps / durations from/to integers and strings
#
# $Id: Timestamp.py 18344 2010-06-22 18:56:38Z caglar $
# $URL: http://svn.planet-lab.org/svn/PLCAPI/trunk/PLC/Timestamp.py $
#
#
# datetime.{datetime,timedelta} are powerful tools, but these objects are not
# natively marshalled over xmlrpc
#
from types import StringTypes
import time, calendar
import datetime
from SMDS.faults import *
from SMDS.parameter import Parameter, Mixed
# a dummy class mostly used as a namespace
class Timestamp:
debug=False
# debug=True
# this is how we expose times to SQL
sql_format = "%Y-%m-%d %H:%M:%S"
sql_format_utc = "%Y-%m-%d %H:%M:%S UTC"
# this one (datetime.isoformat) would work too but that's less readable - we support this input though
iso_format = "%Y-%m-%dT%H:%M:%S"
# sometimes it's convenient to understand more formats
input_formats = [ sql_format,
sql_format_utc,
iso_format,
"%Y-%m-%d %H:%M",
"%Y-%m-%d %H:%M UTC",
]
# for timestamps we usually accept either an int, or an ISO string,
# the datetime.datetime stuff can in general be used locally,
# but not sure it can be marshalled over xmlrpc though
@staticmethod
def Parameter (doc):
return Mixed (Parameter (int, doc + " (unix timestamp)"),
Parameter (str, doc + " (formatted as %s)"%Timestamp.sql_format),
)
@staticmethod
def sql_validate (input, timezone=False, check_future = False):
"""
Validates the specified GMT timestamp, returns a
standardized string suitable for SQL input.
Input may be a number (seconds since UNIX epoch back in 1970,
or a string (in one of the supported input formats).
If timezone is True, the resulting string contains
timezone information, which is hard-wired as 'UTC'
If check_future is True, raises an exception if timestamp is in
the past.
Returns a GMT timestamp string suitable to feed SQL.
"""
if not timezone: output_format = Timestamp.sql_format
else: output_format = Timestamp.sql_format_utc
if Timestamp.debug: print 'sql_validate, in:',input,
if isinstance(input, StringTypes):
sql=''
# calendar.timegm() is the inverse of time.gmtime()
for time_format in Timestamp.input_formats:
try:
timestamp = calendar.timegm(time.strptime(input, time_format))
sql = time.strftime(output_format, time.gmtime(timestamp))
break
# wrong format: ignore
except ValueError: pass
# could not parse it
if not sql:
raise MDInvalidArgument, "Cannot parse timestamp %r - not in any of %r formats"%(input,Timestamp.input_formats)
elif isinstance (input,(int,long,float)):
try:
timestamp = long(input)
sql = time.strftime(output_format, time.gmtime(timestamp))
except Exception,e:
raise MDInvalidArgument, "Timestamp %r not recognized -- %r"%(input,e)
else:
raise MDInvalidArgument, "Timestamp %r - unsupported type %r"%(input,type(input))
if check_future and input < time.time():
raise MDInvalidArgument, "'%s' not in the future" % sql
if Timestamp.debug: print 'sql_validate, out:',sql
return sql
@staticmethod
def sql_validate_utc (timestamp):
"For convenience, return sql_validate(intput, timezone=True, check_future=False)"
return Timestamp.sql_validate (timestamp, timezone=True, check_future=False)
@staticmethod
def cast_long (input):
"""
Translates input timestamp as a unix timestamp.
Input may be a number (seconds since UNIX epoch, i.e., 1970-01-01
00:00:00 GMT), a string (in one of the supported input formats above).
"""
if Timestamp.debug: print 'cast_long, in:',input,
if isinstance(input, StringTypes):
timestamp=0
for time_format in Timestamp.input_formats:
try:
result=calendar.timegm(time.strptime(input, time_format))
if Timestamp.debug: print 'out:',result
return result
# wrong format: ignore
except ValueError: pass
raise MDInvalidArgument, "Cannot parse timestamp %r - not in any of %r formats"%(input,Timestamp.input_formats)
elif isinstance (input,(int,long,float)):
result=long(input)
if Timestamp.debug: print 'out:',result
return result
else:
raise MDInvalidArgument, "Timestamp %r - unsupported type %r"%(input,type(input))
# utility for displaying durations
# be consistent in avoiding the datetime stuff
class Duration:
MINUTE = 60
HOUR = 3600
DAY = 3600*24
@staticmethod
def to_string(duration):
result=[]
left=duration
(days,left) = divmod(left,Duration.DAY)
if days: result.append("%d d)"%td.days)
(hours,left) = divmod (left,Duration.HOUR)
if hours: result.append("%d h"%hours)
(minutes, seconds) = divmod (left, Duration.MINUTE)
if minutes: result.append("%d m"%minutes)
if seconds: result.append("%d s"%seconds)
if not result: result = ['void']
return "-".join(result)
@staticmethod
def validate (duration):
# support seconds only for now, works for int/long/str
try:
return long (duration)
except:
raise MDInvalidArgument, "Could not parse duration %r"%duration
| [
"jcnelson@cs.princeton.edu"
] | jcnelson@cs.princeton.edu |
1baa6dd0777f211ad099d2645e3760939937ecf9 | bad36d483848a45428683dbd69e255240f216b1f | /Random/ex06.py | 3f7052cec4ed7b10aac1f387f18827ec3e315652 | [] | no_license | ualiusman/PyEx | 6fb736b537ce8a5a353ab8c109f92775fd74e56e | e60390be26efa7ac715c1e9977d3d094bf673825 | refs/heads/master | 2022-03-12T20:07:11.693296 | 2022-02-23T20:38:58 | 2022-02-23T20:38:58 | 223,542,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | #Exercise 06:
import random
listString = input("enter a sentance:")
listString = str(listString)
a = ""
for n in range( len(listString),0,-1):
a += listString[n-1]
if listString == a:
print("list is plaindrom")
else:
print("list is not plaindrom")
| [
"usmanali.uali@live.com"
] | usmanali.uali@live.com |
93f760d29e6465c846af2a2e81bf5e01ba327359 | 4c67533d6d5183ed985288a55631fe1c99b5ae21 | /448.py | 9f64bb067dba432831726c47da96cfac37b25b7d | [] | no_license | Jiongxiao/Leetcode | 546f789a0d892fe56d7f53a41aa97ccb2a8e1813 | 641775f750a1197f9aaa23e5122b0add2ae064ee | refs/heads/master | 2021-01-17T01:11:35.970423 | 2017-09-21T07:04:52 | 2017-09-21T07:04:52 | 56,422,836 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | class Solution(object):
def findDisappearedNumbers(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
for i in range(len(nums)):
nums[abs(nums[i])-1]=-abs(nums[abs(nums[i])-1])
return [j+1 for j in range(len(nums)) if nums[j]>0] | [
"tctjx@hotmail.com"
] | tctjx@hotmail.com |
5107394e62284585daa97783cbc58916832410ed | c51f74578d78702461d436cc471b97236482a78b | /Prometheus/Python/4.3.py | 428e6dae7ba914b6b0db41ca397e1996e351ac5f | [] | no_license | taras18taras/Python | fa09481941adab6500854c644d5c1b7baeeb5d62 | a033bfa76dd4031fb04c5939dcce854d42831523 | refs/heads/master | 2022-11-25T20:58:06.711764 | 2018-03-14T07:51:45 | 2018-03-14T07:51:45 | 121,369,243 | 0 | 1 | null | 2022-11-20T03:43:44 | 2018-02-13T10:29:41 | Python | UTF-8 | Python | false | false | 304 | py | import sys
arg="())()(()())(()"
#p={parentheses1: "(", parentheses2: ")"}
s1=''
s2=''
if len(arg)%2 != 0:
print('NO')
exit(0)
elif arg[0] == ")" or arg[-1] == "(":
print("NO")
exit(0)
for i in arg:
if i == "(":
s1+=i
elif i == ")":
s2+=i
if len(s2) == len(s1):
print("YES")
else:
print("NO") | [
"taras18taras@gmail.com"
] | taras18taras@gmail.com |
e14680e33a16ef09bc1af36a1c10975723468088 | 1ad5297557b067a74d4afd33a4938d3c6614934c | /tuxhe_blog/admin.py | f7461b4c01f536c42c22f8eeaa99aa9ec9fd3dd6 | [] | no_license | tuxhe/tuxhe | d084035bcb10ed874d6573b976efaf15ed0f21a3 | 37ac70e2c44ee19151898779da3cff797c45023b | refs/heads/master | 2016-09-03T07:42:01.677909 | 2013-06-10T01:49:55 | 2013-06-10T01:49:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 802 | py | from django.contrib import admin
from tuxhe_blog.models import Post
class PostAdmin(admin.ModelAdmin):
list_display = ('__unicode__', 'author_id', 'create_at', 'update_at')
list_filter = ('create_at', 'update_at')
search_fields = ('title', 'author_id__username', 'author_id__first_name')
fieldsets = [
('Post', {
'fields': ('title', 'url', 'thumbnail', 'short_content', 'content',
'tags', 'status')
}),
('Author', {
'classes': ('collapse',),
'fields': ('author_id',)
}),
('Change History', {
'classes': ('collapse',),
'fields': ('create_at', 'update_at')
})
]
readonly_fields = ('create_at', 'update_at')
admin.site.register(Post, PostAdmin)
| [
"houseedition@gmail.com"
] | houseedition@gmail.com |
9482590b67c4244af53683f471759b596e95df42 | 7e742840fe5b151b50ec9834634a9f75a886e3df | /__manifest__.py | 4a61c2011a200deacc320b93ee13aeb1ae92abb2 | [] | no_license | janeshl/new | cbc55eb26854111485973218406eb7d067fb15dd | ea5836dcec10c21e2e1010d7e887bb9cdab6b287 | refs/heads/main | 2023-04-18T23:37:28.424184 | 2021-04-27T10:16:51 | 2021-04-27T10:16:51 | 358,529,044 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | # -*- coding: utf-8 -*-
{
'name': "new",
'summary': """
Short (1 phrase/line) summary of the module's purpose, used as
subtitle on modules listing or apps.openerp.com""",
'description': """
Long description of module's purpose
""",
'author': "My Company",
'website': "http://www.yourcompany.com",
# Categories can be used to filter modules in modules listing
# Check https://github.com/odoo/odoo/blob/13.0/odoo/addons/base/data/ir_module_category_data.xml
# for the full list
'category': 'Uncategorized',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base'],
# always loaded
'data': [
'security/ir.model.access.csv',
'views/views.xml',
'views/templates.xml',
],
# only loaded in demonstration mode
# 'demo': [
#'demo/demo.xml',
#],
}
| [
"janeshlakshman7@gmail.com"
] | janeshlakshman7@gmail.com |
47902eb46edaee7209310ec530f2a044c245e295 | 031f18c257143aa15ba3a72aae6d82843e5a083f | /env/bin/pasteurize | ddbd87a348c4ffe072dc37e0453b4c52738a7cab | [] | no_license | PabloChamorroTebar/WEB-TFG | c9cbec43fdb88a5462e6e24083667c952ae295c6 | 878e8a819452b38d420b64cea24bd13a3cce09a7 | refs/heads/master | 2021-02-08T09:49:26.972247 | 2020-03-01T11:50:11 | 2020-03-01T11:50:11 | 244,138,029 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 427 | #!/Users/pablochamorro/PycharmProjects/Tfg/env/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'future==0.18.2','console_scripts','pasteurize'
__requires__ = 'future==0.18.2'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('future==0.18.2', 'console_scripts', 'pasteurize')()
)
| [
"pablochamorro@MacBook-Pro-de-Pablo.local"
] | pablochamorro@MacBook-Pro-de-Pablo.local | |
2c478fb713f273ebcc755c97aa70348d66391179 | fea44d5ca4e6c9b2c7950234718a4531d453849e | /sktime/forecasting/tests/test_sarimax.py | 17fdd9984ebd6c8806270391fc1ea39884dc931e | [
"BSD-3-Clause"
] | permissive | mlgig/sktime | 288069ab8c9b0743113877032dfca8cf1c2db3fb | 19618df351a27b77e3979efc191e53987dbd99ae | refs/heads/master | 2023-03-07T20:22:48.553615 | 2023-02-19T18:09:12 | 2023-02-19T18:09:12 | 234,604,691 | 1 | 0 | BSD-3-Clause | 2020-01-17T17:50:12 | 2020-01-17T17:50:11 | null | UTF-8 | Python | false | false | 1,049 | py | # -*- coding: utf-8 -*-
"""Tests the SARIMAX model."""
__author__ = ["TNTran92"]
import pytest
from numpy.testing import assert_allclose
from sktime.forecasting.sarimax import SARIMAX
from sktime.utils._testing.forecasting import make_forecasting_problem
from sktime.utils.validation._dependencies import _check_soft_dependencies
df = make_forecasting_problem()
@pytest.mark.skipif(
not _check_soft_dependencies("statsmodels", severity="none"),
reason="skip test if required soft dependency not available",
)
def test_SARIMAX_against_statsmodels():
"""Compares Sktime's and Statsmodel's SARIMAX."""
from statsmodels.tsa.api import SARIMAX as _SARIMAX
sktime_model = SARIMAX(order=(1, 0, 0), trend="t", seasonal_order=(1, 0, 0, 6))
sktime_model.fit(df)
y_pred = sktime_model.predict(df.index)
stats = _SARIMAX(endog=df, order=(1, 0, 0), trend="t", seasonal_order=(1, 0, 0, 6))
stats_fit = stats.fit()
stats_pred = stats_fit.predict(df.index[0])
assert_allclose(y_pred.tolist(), stats_pred.tolist())
| [
"noreply@github.com"
] | mlgig.noreply@github.com |
8bbeda72f37a50488eb26bdbe34f50cb14917af4 | fcc88521f63a3c22c81a9242ae3b203f2ea888fd | /Python3/0459-Repeated-Substring-Pattern/soln.py | 4ebac002c8929e3bbc180c53514482b5e1dc4e61 | [
"MIT"
] | permissive | wyaadarsh/LeetCode-Solutions | b5963e3427aa547d485d3a2cb24e6cedc72804fd | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | refs/heads/master | 2022-12-06T15:50:37.930987 | 2020-08-30T15:49:27 | 2020-08-30T15:49:27 | 291,811,790 | 0 | 1 | MIT | 2020-08-31T19:57:35 | 2020-08-31T19:57:34 | null | UTF-8 | Python | false | false | 158 | py | class Solution:
def repeatedSubstringPattern(self, s):
"""
:type s: str
:rtype: bool
"""
return s in (s + s)[1:-1] | [
"zhang623@wisc.edu"
] | zhang623@wisc.edu |
947b2fcbdca5e1cac34017bc1c75d05e106a4700 | 9f79c4f9a8a9154fc3dc9202ab8ed2547a722b5f | /Dictionaries/char_count.py | 6e73cf6fa167e94d9894e7ca32f38daa5956f09f | [] | no_license | grigor-stoyanov/PythonFundamentals | 31b6da00bd8294e8e802174dca4e62b231134090 | 5ae5f1f1b9ca9500d10e95318a731d3b29950a30 | refs/heads/main | 2023-02-11T12:17:19.010596 | 2021-01-14T22:14:54 | 2021-01-14T22:14:54 | 321,658,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | line = input().split()
char_dic = {}
for word in line:
for char in range(0, len(word)):
if word[char] not in char_dic:
char_dic[word[char]] = 1
else:
char_dic[word[char]] += 1
for key, value in char_dic.items():
print(f'{key} -> {value}')
| [
"76039296+codelocks7@users.noreply.github.com"
] | 76039296+codelocks7@users.noreply.github.com |
76e9188399310363fa0363e2f54dc699dab67a33 | a764e03aeea2d71ad0ed40d5495dd35cb6a2f2d2 | /setup.py | 66505858ecb7309ac41a1f924e3dfba775dd9a1e | [
"MIT"
] | permissive | ajoaoff/pathfinder | 829860ce2f759e6fca8dce7eae47f0ff9891a827 | f4261c30ba400597085afdc8f55472b355e0f320 | refs/heads/master | 2022-02-07T12:17:58.950555 | 2018-12-14T12:45:31 | 2018-12-14T12:45:31 | 161,476,885 | 0 | 0 | null | 2018-12-12T11:13:32 | 2018-12-12T11:13:32 | null | UTF-8 | Python | false | false | 3,074 | py | """Setup script.
Run "python3 setup --help-commands" to list all available commands and their
descriptions.
"""
import json
from abc import abstractmethod
from pathlib import Path
from subprocess import call, check_call
from setuptools import Command, setup
def read_version_from_json():
"""Read the NApp version from NApp kytos.json file."""
file = Path('kytos.json')
metadata = json.loads(file.read_text())
return metadata['version']
class SimpleCommand(Command):
"""Make Command implementation simpler."""
user_options = []
@abstractmethod
def run(self):
"""Run when command is invoked.
Use *call* instead of *check_call* to ignore failures.
"""
pass
def initialize_options(self):
"""Set default values for options."""
pass
def finalize_options(self):
"""Post-process options."""
pass
class Cleaner(SimpleCommand):
"""Custom clean command to tidy up the project root."""
description = 'clean build, dist, pyc and egg from package and docs'
def run(self):
"""Clean build, dist, pyc and egg from package and docs."""
call('rm -vrf ./build ./dist ./*.egg-info', shell=True)
call('find . -name __pycache__ -type d | xargs rm -rf', shell=True)
call('make -C docs/ clean', shell=True)
class TestCoverage(SimpleCommand):
"""Display test coverage."""
description = 'run unit tests and display code coverage'
def run(self):
"""Run unittest quietly and display coverage report."""
cmd = 'coverage3 run -m unittest discover -qs src' \
' && coverage3 report'
call(cmd, shell=True)
class Linter(SimpleCommand):
"""Code linters."""
description = 'lint Python source code'
def run(self):
"""Run pylama."""
print('Pylama is running. It may take several seconds...')
check_call('pylama setup.py tests main.py', shell=True)
class CITest(SimpleCommand):
"""Run all CI tests."""
description = 'run all CI tests: unit and doc tests, linter'
def run(self):
"""Run unit tests with coverage, doc tests and linter."""
cmds = ['python setup.py ' + cmd
for cmd in ('coverage', 'lint')]
cmd = ' && '.join(cmds)
check_call(cmd, shell=True)
setup(name='kytos/pathfinder',
version=read_version_from_json(),
description='Core Napps developed by Kytos Team',
url='http://github.com/kytos/pathfinder',
author='Kytos Team',
author_email='of-ng-dev@ncc.unesp.br',
license='MIT',
install_requires=[
'kytos>=2017.2b1',
'networkx'
],
cmdclass={
'clean': Cleaner,
'ci': CITest,
'coverage': TestCoverage,
'lint': Linter,
},
zip_safe=False,
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Topic :: System :: Networking',
])
| [
"macartur.sc@gmail.com"
] | macartur.sc@gmail.com |
8c13f655d8ae98bb3e994fd2181dfbed06df9aa9 | f82e67dd5f496d9e6d42b4fad4fb92b6bfb7bf3e | /scripts/client/gui/scaleform/locale/quests.py | d092b4f53be4bc89915ed7d17202ae9982bd837c | [] | no_license | webiumsk/WOT0.10.0 | 4e4413ed4e7b00e22fb85d25fdae9400cbb4e76b | a84f536c73f86d9e8fab559e97f88f99f2ad7e95 | refs/heads/master | 2021-01-09T21:55:00.662437 | 2015-10-23T20:46:45 | 2015-10-23T20:46:45 | 44,835,654 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66,835 | py | # Embedded file name: scripts/client/gui/Scaleform/locale/QUESTS.py
class QUESTS(object):
IGR_TOOLTIP_BATTLESLABEL = '#quests:igr/tooltip/battlesLabel'
IGR_TOOLTIP_WINSLABEL = '#quests:igr/tooltip/winsLabel'
POSTBATTLE_PROGRESSRESET = '#quests:postBattle/progressReset'
TOOLTIP_PROGRESS_GROUPBY_HEADER = '#quests:tooltip/progress/groupBy/header'
TOOLTIP_PROGRESS_GROUPBY_BODY = '#quests:tooltip/progress/groupBy/body'
TOOLTIP_PROGRESS_GROUPBY_NOTE = '#quests:tooltip/progress/groupBy/note'
TOOLTIP_PROGRESS_GROUPBY_NOTE_LEVEL = '#quests:tooltip/progress/groupBy/note/level'
TOOLTIP_VEHTABLE_NATION_HEADER = '#quests:tooltip/vehTable/nation/header'
TOOLTIP_VEHTABLE_NATION_BODY = '#quests:tooltip/vehTable/nation/body'
TOOLTIP_VEHTABLE_CLASS_HEADER = '#quests:tooltip/vehTable/class/header'
TOOLTIP_VEHTABLE_CLASS_BODY = '#quests:tooltip/vehTable/class/body'
TOOLTIP_VEHTABLE_LEVEL_HEADER = '#quests:tooltip/vehTable/level/header'
TOOLTIP_VEHTABLE_LEVEL_BODY = '#quests:tooltip/vehTable/level/body'
TOOLTIP_VEHTABLE_NAME_HEADER = '#quests:tooltip/vehTable/name/header'
TOOLTIP_VEHTABLE_NAME_BODY = '#quests:tooltip/vehTable/name/body'
TOOLTIP_VEHTABLE_AVAILABILITY_HEADER = '#quests:tooltip/vehTable/availability/header'
TOOLTIP_VEHTABLE_AVAILABILITY_BODY = '#quests:tooltip/vehTable/availability/body'
TOOLTIP_VEHTABLE_DISCOUNT_HEADER = '#quests:tooltip/vehTable/discount/header'
TOOLTIP_VEHTABLE_DISCOUNT_BODY = '#quests:tooltip/vehTable/discount/body'
BONUSES_ITEMS_NAME = '#quests:bonuses/items/name'
BONUSES_BOOSTERS_NAME = '#quests:bonuses/boosters/name'
BONUSES_CUSTOMIZATION_VALUE = '#quests:bonuses/customization/value'
BONUSES_VEHICLES_NAME = '#quests:bonuses/vehicles/name'
BONUSES_VEHICLES_CREWLVL = '#quests:bonuses/vehicles/crewLvl'
BONUSES_VEHICLES_RENTDAYS = '#quests:bonuses/vehicles/rentDays'
BONUSES_VEHICLES_DESCRIPTION = '#quests:bonuses/vehicles/description'
BONUSES_ITEM_TANKMENXP = '#quests:bonuses/item/tankmenXP'
BONUSES_ITEM_XPFACTOR = '#quests:bonuses/item/xpFactor'
BONUSES_ITEM_CREDITSFACTOR = '#quests:bonuses/item/creditsFactor'
BONUSES_ITEM_FREEXPFACTOR = '#quests:bonuses/item/freeXPFactor'
BONUSES_ITEM_TANKMENXPFACTOR = '#quests:bonuses/item/tankmenXPFactor'
BONUSES_ITEM_SLOTS = '#quests:bonuses/item/slots'
BONUSES_ITEM_BERTHS = '#quests:bonuses/item/berths'
BONUSES_ITEM_PREMIUM = '#quests:bonuses/item/premium'
BONUSES_ITEM_TASK = '#quests:bonuses/item/task'
BONUSES_CREDITS_DESCRIPTION = '#quests:bonuses/credits/description'
BONUSES_GOLD_DESCRIPTION = '#quests:bonuses/gold/description'
BONUSES_TANKMEN_DESCRIPTION = '#quests:bonuses/tankmen/description'
BONUSES_ITEM_TANKWOMAN = '#quests:bonuses/item/tankwoman'
BONUSES_ITEM_ADDITIONBONUS = '#quests:bonuses/item/additionBonus'
BONUSES_ITEM_TANKMEN_NO_SKILLS = '#quests:bonuses/item/tankmen/no_skills'
BONUSES_ITEM_TANKMEN_WITH_SKILLS = '#quests:bonuses/item/tankmen/with_skills'
QUESTS_TITLE = '#quests:quests/title'
QUESTS_TITLE_MANEUVERSQUESTS = '#quests:quests/title/maneuversQuests'
QUESTS_TITLE_UNGOUPEDQUESTS = '#quests:quests/title/ungoupedQuests'
QUESTS_TITLE_LADDERQUESTS = '#quests:quests/title/ladderQuests'
QUESTS_TITLE_UNGOUPEDACTIONS = '#quests:quests/title/ungoupedActions'
QUESTS_CONDITIONS = '#quests:quests/conditions'
QUESTS_REQUIREMENTS = '#quests:quests/requirements'
QUESTS_CURRENT_NODATA = '#quests:quests/current/nodata'
QUESTS_FUTURE_NODATA = '#quests:quests/future/nodata'
QUESTS_CONTENT_NOQUESTSINROAMING = '#quests:quests/content/noQuestsInRoaming'
QUESTS_TABS_PERSONAL = '#quests:quests/tabs/personal'
QUESTS_TABS_CURRENT = '#quests:quests/tabs/current'
QUESTS_TABS_LADDER = '#quests:quests/tabs/ladder'
QUESTS_TABS_BEGINNER = '#quests:quests/tabs/beginner'
QUESTS_TABS_FUTURE = '#quests:quests/tabs/future'
QUESTSCONTROL_TITLE = '#quests:questsControl/title'
QUESTSCONTROL_ADDITIONALTITLE_NEEDRECEIVEDAWARD = '#quests:questsControl/additionalTitle/needReceivedAward'
QUESTSCONTROL_ADDITIONALTITLE_FREESLOTSANDFREEQUESTS = '#quests:questsControl/additionalTitle/freeSlotsAndFreeQuests'
QUESTSCONTROL_ADDITIONALTITLE_FIRSTRUN = '#quests:questsControl/additionalTitle/firstRun'
QUESTSCONTROL_ADDITIONALTITLE_EMPTY = '#quests:questsControl/additionalTitle/empty'
QUESTS_STATUS_DONE = '#quests:quests/status/done'
QUESTS_STATUS_NOTAVAILABLE = '#quests:quests/status/notAvailable'
QUESTS_STATUS_NOTDONE = '#quests:quests/status/notDone'
QUESTS_TABLE_NOVEHICLES = '#quests:quests/table/noVehicles'
QUESTS_TABLE_AMOUNT = '#quests:quests/table/amount'
QUESTS_TABLE_BATTLESLEFT = '#quests:quests/table/battlesLeft'
QUESTS_TABLE_INHANGAR = '#quests:quests/table/inHangar'
QUESTS_TABLE_NOTINHANGAR = '#quests:quests/table/notInHangar'
QUESTS_CURRENTTAB_HEADER_SORT = '#quests:quests/currentTab/header/sort'
QUESTS_CURRENTTAB_HEADER_CHECKBOX_TEXT = '#quests:quests/currentTab/header/checkBox/text'
QUESTS_CURRENTTAB_HEADER_DROPDOWN_DATE = '#quests:quests/currentTab/header/dropdown/date'
QUESTS_CURRENTTAB_HEADER_DROPDOWN_TIME = '#quests:quests/currentTab/header/dropdown/time'
QUESTS_CURRENTTAB_HEADER_DROPDOWN_ALL = '#quests:quests/currentTab/header/dropdown/all'
QUESTS_CURRENTTAB_HEADER_DROPDOWN_ACTION = '#quests:quests/currentTab/header/dropdown/action'
QUESTS_CURRENTTAB_HEADER_DROPDOWN_QUESTS = '#quests:quests/currentTab/header/dropdown/quests'
QUESTS_CURRENTTAB_HEADER_DROPDOWN_SPECIALMISSION = '#quests:quests/currentTab/header/dropdown/specialMission'
QUESTS_LIST_COMPLETE = '#quests:quests/list/complete'
QUESTS_LIST_CURRENT_NOALL = '#quests:quests/list/current/noAll'
QUESTS_LIST_CURRENT_NOQUESTS = '#quests:quests/list/current/noQuests'
QUESTS_LIST_CURRENT_NOACTIONS = '#quests:quests/list/current/noActions'
QUESTS_LIST_FUTURE_NOALL = '#quests:quests/list/future/noAll'
QUESTS_LIST_FUTURE_NOQUESTS = '#quests:quests/list/future/noQuests'
QUESTS_LIST_FUTURE_NOACTIONS = '#quests:quests/list/future/noActions'
QUESTS_LIST_CLICKCHECKBOX = '#quests:quests/list/clickCheckbox'
QUESTS_TABS_NOSELECTED_TEXT = '#quests:quests/tabs/noselected/text'
QUESTS_TABS_AWARD_TEXT = '#quests:quests/tabs/award/text'
QUESTS_TABS_ADDAWARD_TEXT = '#quests:quests/tabs/addAward/text'
ITEM_TYPE_ACTION = '#quests:item/type/action'
ITEM_TYPE_QUEST = '#quests:item/type/quest'
ITEM_TYPE_QUESTDAILY = '#quests:item/type/questDaily'
ITEM_TYPE_SPECIALMISSION = '#quests:item/type/specialMission'
ITEM_TYPE_QUESTSTRATEGIC = '#quests:item/type/questStrategic'
ITEM_TYPE_POTAPOV = '#quests:item/type/potapov'
ITEM_TYPE_LADDER = '#quests:item/type/ladder'
ITEM_TIMER_TILLSTART = '#quests:item/timer/tillStart'
ITEM_TIMER_TILLSTART_DAYS = '#quests:item/timer/tillStart/days'
ITEM_TIMER_TILLSTART_HOURS = '#quests:item/timer/tillStart/hours'
ITEM_TIMER_TILLSTART_MIN = '#quests:item/timer/tillStart/min'
ITEM_TIMER_TILLFINISH = '#quests:item/timer/tillFinish'
ITEM_TIMER_TILLFINISH_DAYS = '#quests:item/timer/tillFinish/days'
ITEM_TIMER_TILLFINISH_HOURS = '#quests:item/timer/tillFinish/hours'
ITEM_TIMER_TILLFINISH_LONGFORMAT = '#quests:item/timer/tillFinish/longFormat'
ITEM_TIMER_TILLFINISH_ONLYHOURS = '#quests:item/timer/tillFinish/onlyHours'
ITEM_TIMER_TILLFINISH_LESSTHANHOUR = '#quests:item/timer/tillFinish/lessThanHour'
ITEM_TIMER_TILLFINISH_SHORTFORMAT = '#quests:item/timer/tillFinish/shortFormat'
ITEM_TIMER_TILLFINISH_SHORTFULLFORMAT = '#quests:item/timer/tillFinish/shortFullFormat'
ITEM_TIMER_TILLFINISH_LONGFULLFORMAT = '#quests:item/timer/tillFinish/longFullFormat'
PERSONAL_SEASONS_AWARDSBUTTON = '#quests:personal/seasons/awardsButton'
PERSONAL_SEASONS_TAB_RANDOM = '#quests:personal/seasons/tab/random'
PERSONAL_SEASONS_TAB_FALLOUT = '#quests:personal/seasons/tab/fallout'
PERSONAL_SEASONS_ITEMTITLE = '#quests:personal/seasons/itemTitle'
PERSONAL_SEASONS_SHORTSEASONNAME = '#quests:personal/seasons/shortSeasonName'
PERSONAL_SEASONS_TILELABEL = '#quests:personal/seasons/tileLabel'
PERSONAL_SEASONS_TILEPROGRESS = '#quests:personal/seasons/tileProgress'
PERSONAL_SEASONS_SLOTS_NODATA = '#quests:personal/seasons/slots/noData'
PERSONAL_SEASONS_SLOTS_GETAWARD = '#quests:personal/seasons/slots/getAward'
PERSONAL_SEASONS_SLOTS_TITLE = '#quests:personal/seasons/slots/title'
PERSONAL_SEASONS_SLOTS_NOACTIVESLOTS_HEADER = '#quests:personal/seasons/slots/noActiveSlots/header'
PERSONAL_SEASONS_SLOTS_NOACTIVESLOTS_BODY = '#quests:personal/seasons/slots/noActiveSlots/body'
DETAILS_HEADER_INFO_TITLE = '#quests:details/header/info/title'
DETAILS_HEADER_INFO_DESCR_PARALLEL = '#quests:details/header/info/descr_parallel'
DETAILS_HEADER_INFO_DESCR_SERIAL = '#quests:details/header/info/descr_serial'
DETAILS_HEADER_TILLDATE = '#quests:details/header/tillDate'
DETAILS_HEADER_TILLDATETIMES = '#quests:details/header/tillDateTimes'
DETAILS_HEADER_TILLDATEDAYS = '#quests:details/header/tillDateDays'
DETAILS_HEADER_TILLDATEDAYSTIMES = '#quests:details/header/tillDateDaysTimes'
DETAILS_HEADER_ACTIVEDURATION = '#quests:details/header/activeDuration'
DETAILS_HEADER_ACTIVEDURATIONTIMES = '#quests:details/header/activeDurationTimes'
DETAILS_HEADER_ACTIVEDURATIONDAYS = '#quests:details/header/activeDurationDays'
DETAILS_HEADER_ACTIVEDURATIONDAYSTIMES = '#quests:details/header/activeDurationDaysTimes'
DETAILS_HEADER_SCHEDULEDAYS = '#quests:details/header/scheduleDays'
DETAILS_HEADER_SCHEDULETIMES = '#quests:details/header/scheduleTimes'
DETAILS_HEADER_SCHEDULEDAYSTIMES = '#quests:details/header/scheduleDaysTimes'
DETAILS_HEADER_HASNOVEHICLES = '#quests:details/header/hasNoVehicles'
DETAILS_HEADER_COMPLETION_DAILY = '#quests:details/header/completion/daily'
DETAILS_HEADER_COMPLETION_DAILY_GROUPBYVEHICLE = '#quests:details/header/completion/daily/groupByVehicle'
DETAILS_HEADER_COMPLETION_DAILY_GROUPBYNATION = '#quests:details/header/completion/daily/groupByNation'
DETAILS_HEADER_COMPLETION_DAILY_GROUPBYLEVEL = '#quests:details/header/completion/daily/groupByLevel'
DETAILS_HEADER_COMPLETION_DAILY_GROUPBYCLASS = '#quests:details/header/completion/daily/groupByClass'
DETAILS_HEADER_COMPLETION_UNLIMITED = '#quests:details/header/completion/unlimited'
DETAILS_HEADER_COMPLETION_SINGLE = '#quests:details/header/completion/single'
DETAILS_HEADER_COMPLETION_SINGLE_GROUPBYVEHICLE = '#quests:details/header/completion/single/groupByVehicle'
DETAILS_HEADER_COMPLETION_SINGLE_GROUPBYNATION = '#quests:details/header/completion/single/groupByNation'
DETAILS_HEADER_COMPLETION_SINGLE_GROUPBYLEVEL = '#quests:details/header/completion/single/groupByLevel'
DETAILS_HEADER_COMPLETION_SINGLE_GROUPBYCLASS = '#quests:details/header/completion/single/groupByClass'
DETAILS_HEADER_COMETOEND = '#quests:details/header/comeToEnd'
DETAILS_HEADER_COMETOENDINMINUTES = '#quests:details/header/comeToEndInMinutes'
DETAILS_TASKS_SUBTASK = '#quests:details/tasks/subTask'
DETAILS_TASKS_NEXTTASK = '#quests:details/tasks/nextTask'
DETAILS_TASKS_STRATEGIC = '#quests:details/tasks/strategic'
DETAILS_TASKS_REQUIREMENTS_ACCOUNTLABEL = '#quests:details/tasks/requirements/accountLabel'
DETAILS_TASKS_REQUIREMENTS_VEHICLELABEL = '#quests:details/tasks/requirements/vehicleLabel'
DETAILS_TASKS_REQUIREMENTS_VEHICLELABEL_SUITABLE = '#quests:details/tasks/requirements/vehicleLabel/suitable'
DETAILS_TASKS_REQUIREMENTS_VEHICLELABEL_FROM = '#quests:details/tasks/requirements/vehicleLabel/from'
DETAILS_REQUIREMENTS_VEHICLESTABLE_NAME = '#quests:details/requirements/vehiclesTable/name'
DETAILS_REQUIREMENTS_VEHICLESTABLE_DISCOUNT = '#quests:details/requirements/vehiclesTable/discount'
DETAILS_REQUIREMENTS_VEHICLESTABLE_COUNT = '#quests:details/requirements/vehiclesTable/count'
DETAILS_CONDITIONS_LABEL = '#quests:details/conditions/label'
DETAILS_STATUS_COMPLETED = '#quests:details/status/completed'
DETAILS_STATUS_COMPLETED_DAILY = '#quests:details/status/completed/daily'
DETAILS_STATUS_NOTAVAILABLE_IN_FUTURE = '#quests:details/status/notAvailable/in_future'
DETAILS_STATUS_NOTAVAILABLE_INVALID_WEEKDAY = '#quests:details/status/notAvailable/invalid_weekday'
DETAILS_STATUS_NOTAVAILABLE_INVALID_TIME_INTERVAL = '#quests:details/status/notAvailable/invalid_time_interval'
DETAILS_STATUS_NOTAVAILABLE_OUT_OF_DATE = '#quests:details/status/notAvailable/out_of_date'
DETAILS_STATUS_NOTAVAILABLE_REQUIREMENTS = '#quests:details/status/notAvailable/requirements'
DETAILS_RELATIONS1_GREATER = '#quests:details/relations1/greater'
DETAILS_RELATIONS1_LESS = '#quests:details/relations1/less'
DETAILS_RELATIONS1_EQUAL = '#quests:details/relations1/equal'
DETAILS_RELATIONS1_NOTEQUAL = '#quests:details/relations1/notEqual'
DETAILS_RELATIONS1_LESSOREQUAL = '#quests:details/relations1/lessOrEqual'
DETAILS_RELATIONS1_GREATEROREQUAL = '#quests:details/relations1/greaterOrEqual'
DETAILS_RELATIONS2_GREATER = '#quests:details/relations2/greater'
DETAILS_RELATIONS2_LESS = '#quests:details/relations2/less'
DETAILS_RELATIONS2_EQUAL = '#quests:details/relations2/equal'
DETAILS_RELATIONS2_NOTEQUAL = '#quests:details/relations2/notEqual'
DETAILS_RELATIONS2_LESSOREQUAL = '#quests:details/relations2/lessOrEqual'
DETAILS_RELATIONS2_GREATEROREQUAL = '#quests:details/relations2/greaterOrEqual'
DETAILS_GROUPS_OR = '#quests:details/groups/or'
DETAILS_REQUIREMENTS_IGR = '#quests:details/requirements/igr'
DETAILS_REQUIREMENTS_IGRBASIC = '#quests:details/requirements/igrBasic'
DETAILS_REQUIREMENTS_IGRPREMIUM = '#quests:details/requirements/igrPremium'
DETAILS_REQUIREMENTS_TOKEN = '#quests:details/requirements/token'
DETAILS_REQUIREMENTS_TOKEN_N = '#quests:details/requirements/token/N'
DETAILS_REQUIREMENTS_GROUP_TOKEN_N = '#quests:details/requirements/group/token/N'
DETAILS_REQUIREMENTS_PREMIUMACCOUNT = '#quests:details/requirements/premiumAccount'
DETAILS_REQUIREMENTS_NOTPREMIUMACCOUNT = '#quests:details/requirements/notPremiumAccount'
DETAILS_REQUIREMENTS_INCLAN = '#quests:details/requirements/inClan'
DETAILS_REQUIREMENTS_NOTINCLAN = '#quests:details/requirements/notInClan'
DETAILS_REQUIREMENTS_INANYCLAN = '#quests:details/requirements/inAnyClan'
DETAILS_REQUIREMENTS_NOTINANYCLAN = '#quests:details/requirements/notInAnyClan'
DETAILS_REQUIREMENTS_FORCURRENTCLAN = '#quests:details/requirements/forCurrentClan'
DETAILS_REQUIREMENTS_NOTFORCURRENTCLAN = '#quests:details/requirements/notForCurrentClan'
DETAILS_REQUIREMENTS_GLOBALRATING = '#quests:details/requirements/globalRating'
DETAILS_REQUIREMENTS_DOSSIERVALUE = '#quests:details/requirements/dossierValue'
DETAILS_REQUIREMENTS_DOSSIERAVGVALUE = '#quests:details/requirements/dossierAvgValue'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED = '#quests:details/requirements/vehiclesUnlocked'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_NOT = '#quests:details/requirements/vehiclesUnlocked/not'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_ALL = '#quests:details/requirements/vehiclesUnlocked/all'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_NATION = '#quests:details/requirements/vehiclesUnlocked/nation'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_NATION_NOT = '#quests:details/requirements/vehiclesUnlocked/nation/not'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_TYPE = '#quests:details/requirements/vehiclesUnlocked/type'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_TYPE_NOT = '#quests:details/requirements/vehiclesUnlocked/type/not'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_LEVEL = '#quests:details/requirements/vehiclesUnlocked/level'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_LEVEL_NOT = '#quests:details/requirements/vehiclesUnlocked/level/not'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_NATION_TYPE = '#quests:details/requirements/vehiclesUnlocked/nation_type'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_NATION_TYPE_NOT = '#quests:details/requirements/vehiclesUnlocked/nation_type/not'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_NATION_LEVEL = '#quests:details/requirements/vehiclesUnlocked/nation_level'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_NATION_LEVEL_NOT = '#quests:details/requirements/vehiclesUnlocked/nation_level/not'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_TYPE_LEVEL = '#quests:details/requirements/vehiclesUnlocked/type_level'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_TYPE_LEVEL_NOT = '#quests:details/requirements/vehiclesUnlocked/type_level/not'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_NATION_TYPE_LEVEL = '#quests:details/requirements/vehiclesUnlocked/nation_type_level'
DETAILS_REQUIREMENTS_VEHICLESUNLOCKED_NATION_TYPE_LEVEL_NOT = '#quests:details/requirements/vehiclesUnlocked/nation_type_level/not'
DETAILS_REQUIREMENTS_VEHICLESOWNED = '#quests:details/requirements/vehiclesOwned'
DETAILS_REQUIREMENTS_VEHICLESOWNED_NOT = '#quests:details/requirements/vehiclesOwned/not'
DETAILS_REQUIREMENTS_VEHICLESOWNED_ALL = '#quests:details/requirements/vehiclesOwned/all'
DETAILS_REQUIREMENTS_VEHICLESOWNED_NATION = '#quests:details/requirements/vehiclesOwned/nation'
DETAILS_REQUIREMENTS_VEHICLESOWNED_NATION_NOT = '#quests:details/requirements/vehiclesOwned/nation/not'
DETAILS_REQUIREMENTS_VEHICLESOWNED_TYPE = '#quests:details/requirements/vehiclesOwned/type'
DETAILS_REQUIREMENTS_VEHICLESOWNED_TYPE_NOT = '#quests:details/requirements/vehiclesOwned/type/not'
DETAILS_REQUIREMENTS_VEHICLESOWNED_LEVEL = '#quests:details/requirements/vehiclesOwned/level'
DETAILS_REQUIREMENTS_VEHICLESOWNED_LEVEL_NOT = '#quests:details/requirements/vehiclesOwned/level/not'
DETAILS_REQUIREMENTS_VEHICLESOWNED_NATION_TYPE = '#quests:details/requirements/vehiclesOwned/nation_type'
DETAILS_REQUIREMENTS_VEHICLESOWNED_NATION_TYPE_NOT = '#quests:details/requirements/vehiclesOwned/nation_type/not'
DETAILS_REQUIREMENTS_VEHICLESOWNED_NATION_LEVEL = '#quests:details/requirements/vehiclesOwned/nation_level'
DETAILS_REQUIREMENTS_VEHICLESOWNED_NATION_LEVEL_NOT = '#quests:details/requirements/vehiclesOwned/nation_level/not'
DETAILS_REQUIREMENTS_VEHICLESOWNED_TYPE_LEVEL = '#quests:details/requirements/vehiclesOwned/type_level'
DETAILS_REQUIREMENTS_VEHICLESOWNED_TYPE_LEVEL_NOT = '#quests:details/requirements/vehiclesOwned/type_level/not'
DETAILS_REQUIREMENTS_VEHICLESOWNED_NATION_TYPE_LEVEL = '#quests:details/requirements/vehiclesOwned/nation_type_level'
DETAILS_REQUIREMENTS_VEHICLESOWNED_NATION_TYPE_LEVEL_NOT = '#quests:details/requirements/vehiclesOwned/nation_type_level/not'
DETAILS_REQUIREMENTS_VEHICLE_RECEIVEDMULTXP = '#quests:details/requirements/vehicle/receivedMultXp'
DETAILS_REQUIREMENTS_VEHICLE_NOTRECEIVEDMULTXP = '#quests:details/requirements/vehicle/notReceivedMultXp'
DETAILS_REQUIREMENTS_VEHICLE_ANY = '#quests:details/requirements/vehicle/any'
DETAILS_REQUIREMENTS_LADDER = '#quests:details/requirements/ladder'
DETAILS_CONDITIONS_TITLE = '#quests:details/conditions/title'
DETAILS_CONDITIONS_VEHICLEDESCR = '#quests:details/conditions/vehicleDescr'
DETAILS_CONDITIONS_VEHICLE = '#quests:details/conditions/vehicle'
DETAILS_CONDITIONS_VEHICLEKILLS = '#quests:details/conditions/vehicleKills'
DETAILS_CONDITIONS_VEHICLESUNLOCKED = '#quests:details/conditions/vehiclesUnlocked'
DETAILS_CONDITIONS_VEHICLESOWNED = '#quests:details/conditions/vehiclesOwned'
DETAILS_CONDITIONS_BATTLEBONUSTYPE = '#quests:details/conditions/battleBonusType'
DETAILS_CONDITIONS_FORMATION = '#quests:details/conditions/formation'
DETAILS_CONDITIONS_MAP = '#quests:details/conditions/map'
DETAILS_CONDITIONS_MAP_NOT = '#quests:details/conditions/map/not'
DETAILS_CONDITIONS_MAPS = '#quests:details/conditions/maps'
DETAILS_CONDITIONS_MAPS_NOT = '#quests:details/conditions/maps/not'
DETAILS_CONDITIONS_MAPSTYPE = '#quests:details/conditions/mapsType'
DETAILS_CONDITIONS_MAPSTYPE_SUMMER = '#quests:details/conditions/mapsType/summer'
DETAILS_CONDITIONS_MAPSTYPE_DESERT = '#quests:details/conditions/mapsType/desert'
DETAILS_CONDITIONS_MAPSTYPE_WINTER = '#quests:details/conditions/mapsType/winter'
DETAILS_CONDITIONS_FORMATION_SQUAD = '#quests:details/conditions/formation/squad'
DETAILS_CONDITIONS_NOTSQUAD = '#quests:details/conditions/notSquad'
DETAILS_CONDITIONS_CLANMEMBERSHIP_ANY_FORMATION = '#quests:details/conditions/clanMembership/any/formation'
DETAILS_CONDITIONS_CLANMEMBERSHIP_ANY_SQUAD = '#quests:details/conditions/clanMembership/any/squad'
DETAILS_CONDITIONS_CLANMEMBERSHIP_ANY_TEAM7X7 = '#quests:details/conditions/clanMembership/any/team7x7'
DETAILS_CONDITIONS_CLANMEMBERSHIP_ANY_COMPANY = '#quests:details/conditions/clanMembership/any/company'
DETAILS_CONDITIONS_CLANMEMBERSHIP_SAME_FORMATION = '#quests:details/conditions/clanMembership/same/formation'
DETAILS_CONDITIONS_CLANMEMBERSHIP_SAME_SQUAD = '#quests:details/conditions/clanMembership/same/squad'
DETAILS_CONDITIONS_CLANMEMBERSHIP_SAME_TEAM7X7 = '#quests:details/conditions/clanMembership/same/team7x7'
DETAILS_CONDITIONS_CLANMEMBERSHIP_SAME_COMPANY = '#quests:details/conditions/clanMembership/same/company'
DETAILS_CONDITIONS_FORMATION_CLAN = '#quests:details/conditions/formation/clan'
DETAILS_CONDITIONS_HISTORICALBATTLES = '#quests:details/conditions/historicalBattles'
DETAILS_CONDITIONS_BATTLES = '#quests:details/conditions/battles'
DETAILS_CONDITIONS_BATTLESINROW = '#quests:details/conditions/battlesInRow'
DETAILS_CONDITIONS_ACHIEVEMENTS = '#quests:details/conditions/achievements'
DETAILS_CONDITIONS_ACHIEVEMENTS_NOT = '#quests:details/conditions/achievements/not'
DETAILS_CONDITIONS_CLANKILLS = '#quests:details/conditions/clanKills'
DETAILS_CONDITIONS_CLANKILLS_NOT = '#quests:details/conditions/clanKills/not'
DETAILS_CONDITIONS_CLANKILLS_CAMO_RED = '#quests:details/conditions/clanKills/camo/red'
DETAILS_CONDITIONS_CLANKILLS_CAMO_SILVER = '#quests:details/conditions/clanKills/camo/silver'
DETAILS_CONDITIONS_CLANKILLS_CAMO_GOLD = '#quests:details/conditions/clanKills/camo/gold'
DETAILS_CONDITIONS_CLANKILLS_CAMO_BLACK = '#quests:details/conditions/clanKills/camo/black'
DETAILS_CONDITIONS_ONEACHIEVEMENT = '#quests:details/conditions/oneAchievement'
DETAILS_CONDITIONS_ONEACHIEVEMENT_NOT = '#quests:details/conditions/oneAchievement/not'
DETAILS_CONDITIONS_WIN = '#quests:details/conditions/win'
DETAILS_CONDITIONS_NOTWIN = '#quests:details/conditions/notWin'
DETAILS_CONDITIONS_SURVIVE = '#quests:details/conditions/survive'
DETAILS_CONDITIONS_NOTSURVIVE = '#quests:details/conditions/notSurvive'
DETAILS_CONDITIONS_RESULTS_SINGLE_SIMPLE = '#quests:details/conditions/results/single/simple'
DETAILS_CONDITIONS_RESULTS_SINGLE_AVG = '#quests:details/conditions/results/single/avg'
DETAILS_CONDITIONS_RESULTS_SINGLE_HALFTEAM_TOP = '#quests:details/conditions/results/single/halfTeam/top'
DETAILS_CONDITIONS_RESULTS_SINGLE_HALFTEAM_TOP_NOT = '#quests:details/conditions/results/single/halfTeam/top/not'
DETAILS_CONDITIONS_RESULTS_SINGLE_BOTHTEAMS_TOP = '#quests:details/conditions/results/single/bothTeams/top'
DETAILS_CONDITIONS_RESULTS_SINGLE_BOTHTEAMS_TOP_NOT = '#quests:details/conditions/results/single/bothTeams/top/not'
DETAILS_CONDITIONS_RESULTS_SINGLE_HALFTEAM_RANGE = '#quests:details/conditions/results/single/halfTeam/range'
DETAILS_CONDITIONS_RESULTS_SINGLE_HALFTEAM_RANGE_NOT = '#quests:details/conditions/results/single/halfTeam/range/not'
DETAILS_CONDITIONS_RESULTS_SINGLE_BOTHTEAMS_RANGE = '#quests:details/conditions/results/single/bothTeams/range'
DETAILS_CONDITIONS_RESULTS_SINGLE_BOTHTEAMS_RANGE_NOT = '#quests:details/conditions/results/single/bothTeams/range/not'
DETAILS_CONDITIONS_RESULTS_SINGLE_HALFTEAM_POSITION = '#quests:details/conditions/results/single/halfTeam/position'
DETAILS_CONDITIONS_RESULTS_SINGLE_HALFTEAM_POSITION_NOT = '#quests:details/conditions/results/single/halfTeam/position/not'
DETAILS_CONDITIONS_RESULTS_SINGLE_BOTHTEAMS_POSITION = '#quests:details/conditions/results/single/bothTeams/position'
DETAILS_CONDITIONS_RESULTS_SINGLE_BOTHTEAMS_POSITION_NOT = '#quests:details/conditions/results/single/bothTeams/position/not'
DETAILS_CONDITIONS_RESULTS_FORMATION_ALIVE = '#quests:details/conditions/results/formation/alive'
DETAILS_CONDITIONS_RESULTS_FORMATION_ALIVE_NOT = '#quests:details/conditions/results/formation/alive/not'
DETAILS_CONDITIONS_RESULTS_FORMATION_SIMPLE = '#quests:details/conditions/results/formation/simple'
DETAILS_CONDITIONS_RESULTS_FORMATION_AVG = '#quests:details/conditions/results/formation/avg'
DETAILS_CONDITIONS_RESULTS_SQUAD_ALIVE = '#quests:details/conditions/results/squad/alive'
DETAILS_CONDITIONS_RESULTS_SQUAD_ALIVE_NOT = '#quests:details/conditions/results/squad/alive/not'
DETAILS_CONDITIONS_RESULTS_SQUAD_SIMPLE = '#quests:details/conditions/results/squad/simple'
DETAILS_CONDITIONS_RESULTS_SQUAD_AVG = '#quests:details/conditions/results/squad/avg'
DETAILS_CONDITIONS_RESULTS_COMPANY_ALIVE = '#quests:details/conditions/results/company/alive'
DETAILS_CONDITIONS_RESULTS_COMPANY_ALIVE_NOT = '#quests:details/conditions/results/company/alive/not'
DETAILS_CONDITIONS_RESULTS_COMPANY_SIMPLE = '#quests:details/conditions/results/company/simple'
DETAILS_CONDITIONS_RESULTS_COMPANY_AVG = '#quests:details/conditions/results/company/avg'
DETAILS_CONDITIONS_RESULTS_TEAM7X7_ALIVE = '#quests:details/conditions/results/team7x7/alive'
DETAILS_CONDITIONS_RESULTS_TEAM7X7_ALIVE_NOT = '#quests:details/conditions/results/team7x7/alive/not'
DETAILS_CONDITIONS_RESULTS_TEAM7X7_SIMPLE = '#quests:details/conditions/results/team7x7/simple'
DETAILS_CONDITIONS_RESULTS_TEAM7X7_AVG = '#quests:details/conditions/results/team7x7/avg'
DETAILS_CONDITIONS_CUMULATIVE_SINGLE = '#quests:details/conditions/cumulative/single'
DETAILS_CONDITIONS_CUMULATIVE_FORMATION = '#quests:details/conditions/cumulative/formation'
DETAILS_CONDITIONS_CUMULATIVE_SQUAD = '#quests:details/conditions/cumulative/squad'
DETAILS_CONDITIONS_CUMULATIVE_COMPANY = '#quests:details/conditions/cumulative/company'
DETAILS_CONDITIONS_CUMULATIVE_TEAM7X7 = '#quests:details/conditions/cumulative/team7x7'
DETAILS_CONDITIONS_VEHICLESKILLS = '#quests:details/conditions/vehiclesKills'
DETAILS_CONDITIONS_VEHICLESKILLS_NOT = '#quests:details/conditions/vehiclesKills/not'
DETAILS_CONDITIONS_VEHICLESKILLS_ALL = '#quests:details/conditions/vehiclesKills/all'
DETAILS_CONDITIONS_VEHICLESKILLS_NATION = '#quests:details/conditions/vehiclesKills/nation'
DETAILS_CONDITIONS_VEHICLESKILLS_NATION_NOT = '#quests:details/conditions/vehiclesKills/nation/not'
DETAILS_CONDITIONS_VEHICLESKILLS_TYPE = '#quests:details/conditions/vehiclesKills/type'
DETAILS_CONDITIONS_VEHICLESKILLS_TYPE_NOT = '#quests:details/conditions/vehiclesKills/type/not'
DETAILS_CONDITIONS_VEHICLESKILLS_LEVEL = '#quests:details/conditions/vehiclesKills/level'
DETAILS_CONDITIONS_VEHICLESKILLS_LEVEL_NOT = '#quests:details/conditions/vehiclesKills/level/not'
DETAILS_CONDITIONS_VEHICLESKILLS_NATION_TYPE = '#quests:details/conditions/vehiclesKills/nation_type'
DETAILS_CONDITIONS_VEHICLESKILLS_NATION_TYPE_NOT = '#quests:details/conditions/vehiclesKills/nation_type/not'
DETAILS_CONDITIONS_VEHICLESKILLS_NATION_LEVEL = '#quests:details/conditions/vehiclesKills/nation_level'
DETAILS_CONDITIONS_VEHICLESKILLS_NATION_LEVEL_NOT = '#quests:details/conditions/vehiclesKills/nation_level/not'
DETAILS_CONDITIONS_VEHICLESKILLS_TYPE_LEVEL = '#quests:details/conditions/vehiclesKills/type_level'
DETAILS_CONDITIONS_VEHICLESKILLS_TYPE_LEVEL_NOT = '#quests:details/conditions/vehiclesKills/type_level/not'
DETAILS_CONDITIONS_VEHICLESKILLS_NATION_TYPE_LEVEL = '#quests:details/conditions/vehiclesKills/nation_type_level'
DETAILS_CONDITIONS_VEHICLESKILLS_NATION_TYPE_LEVEL_NOT = '#quests:details/conditions/vehiclesKills/nation_type_level/not'
DETAILS_CONDITIONS_GROUPBY_NATION = '#quests:details/conditions/groupBy/nation'
DETAILS_CONDITIONS_GROUPBY_CLASS = '#quests:details/conditions/groupBy/class'
DETAILS_CONDITIONS_GROUPBY_LEVEL = '#quests:details/conditions/groupBy/level'
DETAILS_CONDITIONS_GROUPBY_VEHICLE = '#quests:details/conditions/groupBy/vehicle'
DETAILS_CONDITIONS_GROUPBY_LEVELLABEL = '#quests:details/conditions/groupBy/levelLabel'
DETAILS_CONDITIONS_POSTBATTLE_SEPARATOR = '#quests:details/conditions/postBattle/separator'
DETAILS_CONDITIONS_POSTBATTLE_DAILYRESET_TIMEFMT = '#quests:details/conditions/postBattle/dailyReset/timeFmt'
DETAILS_CONDITIONS_POSTBATTLE_DAILYRESET = '#quests:details/conditions/postBattle/dailyReset'
DETAILS_CONDITIONS_CUMULATIVE_HEALTH = '#quests:details/conditions/cumulative/health'
DETAILS_CONDITIONS_CUMULATIVE_XP = '#quests:details/conditions/cumulative/xp'
DETAILS_CONDITIONS_CUMULATIVE_DIRECTHITS = '#quests:details/conditions/cumulative/directHits'
DETAILS_CONDITIONS_CUMULATIVE_DIRECTTEAMHITS = '#quests:details/conditions/cumulative/directTeamHits'
DETAILS_CONDITIONS_CUMULATIVE_EXPLOSIONHITS = '#quests:details/conditions/cumulative/explosionHits'
DETAILS_CONDITIONS_CUMULATIVE_PIERCINGS = '#quests:details/conditions/cumulative/piercings'
DETAILS_CONDITIONS_CUMULATIVE_SHOTS = '#quests:details/conditions/cumulative/shots'
DETAILS_CONDITIONS_CUMULATIVE_DAMAGEDEALT = '#quests:details/conditions/cumulative/damageDealt'
DETAILS_CONDITIONS_CUMULATIVE_MAXDAMAGE = '#quests:details/conditions/cumulative/maxDamage'
DETAILS_CONDITIONS_CUMULATIVE_DAMAGEASSISTED = '#quests:details/conditions/cumulative/damageAssisted'
DETAILS_CONDITIONS_CUMULATIVE_DAMAGERECEIVED = '#quests:details/conditions/cumulative/damageReceived'
DETAILS_CONDITIONS_CUMULATIVE_DIRECTHITSRECEIVED = '#quests:details/conditions/cumulative/directHitsReceived'
DETAILS_CONDITIONS_CUMULATIVE_NODAMAGEDIRECTHITSRECEIVED = '#quests:details/conditions/cumulative/noDamageDirectHitsReceived'
DETAILS_CONDITIONS_CUMULATIVE_EXPLOSIONHITSRECEIVED = '#quests:details/conditions/cumulative/explosionHitsReceived'
DETAILS_CONDITIONS_CUMULATIVE_PIERCINGSRECEIVED = '#quests:details/conditions/cumulative/piercingsReceived'
DETAILS_CONDITIONS_CUMULATIVE_SPOTTED = '#quests:details/conditions/cumulative/spotted'
DETAILS_CONDITIONS_CUMULATIVE_DAMAGED = '#quests:details/conditions/cumulative/damaged'
DETAILS_CONDITIONS_CUMULATIVE_KILLS = '#quests:details/conditions/cumulative/kills'
DETAILS_CONDITIONS_CUMULATIVE_TDAMAGEDEALT = '#quests:details/conditions/cumulative/tdamageDealt'
DETAILS_CONDITIONS_CUMULATIVE_TKILLS = '#quests:details/conditions/cumulative/tkills'
DETAILS_CONDITIONS_CUMULATIVE_CAPTUREPOINTS = '#quests:details/conditions/cumulative/capturePoints'
DETAILS_CONDITIONS_CUMULATIVE_DROPPEDCAPTUREPOINTS = '#quests:details/conditions/cumulative/droppedCapturePoints'
DETAILS_CONDITIONS_CUMULATIVE_ACHIEVEMENTS = '#quests:details/conditions/cumulative/achievements'
DETAILS_CONDITIONS_CUMULATIVE_POTENTIALDAMAGERECEIVED = '#quests:details/conditions/cumulative/potentialDamageReceived'
DETAILS_CONDITIONS_CUMULATIVE_DAMAGEBLOCKEDBYARMOR = '#quests:details/conditions/cumulative/damageBlockedByArmor'
DETAILS_CONDITIONS_CUMULATIVE_FREEXP = '#quests:details/conditions/cumulative/freeXP'
DETAILS_CONDITIONS_CUMULATIVE_POTENTIALDAMAGEDEALT = '#quests:details/conditions/cumulative/potentialDamageDealt'
DETAILS_CONDITIONS_CUMULATIVE_SOLOHITSASSISTED = '#quests:details/conditions/cumulative/soloHitsAssisted'
DETAILS_CONDITIONS_CUMULATIVE_ORIGINALXP = '#quests:details/conditions/cumulative/originalXP'
DETAILS_CONDITIONS_CUMULATIVE_DAMAGEASSISTEDTRACK = '#quests:details/conditions/cumulative/damageAssistedTrack'
DETAILS_CONDITIONS_CUMULATIVE_DAMAGEASSISTEDRADIO = '#quests:details/conditions/cumulative/damageAssistedRadio'
DETAILS_CONDITIONS_CUMULATIVE_MARKOFMASTERY = '#quests:details/conditions/cumulative/markOfMastery'
DETAILS_CONDITIONS_CUMULATIVE_MARKOFMASTERY0 = '#quests:details/conditions/cumulative/markOfMastery0'
DETAILS_CONDITIONS_CUMULATIVE_MARKOFMASTERY0_NOT = '#quests:details/conditions/cumulative/markOfMastery0/not'
DETAILS_CONDITIONS_CUMULATIVE_MARKOFMASTERY1 = '#quests:details/conditions/cumulative/markOfMastery1'
DETAILS_CONDITIONS_CUMULATIVE_MARKOFMASTERY2 = '#quests:details/conditions/cumulative/markOfMastery2'
DETAILS_CONDITIONS_CUMULATIVE_MARKOFMASTERY3 = '#quests:details/conditions/cumulative/markOfMastery3'
DETAILS_CONDITIONS_CUMULATIVE_MARKOFMASTERY4 = '#quests:details/conditions/cumulative/markOfMastery4'
DETAILS_CONDITIONS_CLUBS_GETDIVISION = '#quests:details/conditions/clubs/getDivision'
DETAILS_CONDITIONS_CLUBS_BATTLES = '#quests:details/conditions/clubs/battles'
DETAILS_CONDITIONS_CLUBS_HASCLUB = '#quests:details/conditions/clubs/hasClub'
DETAILS_DOSSIER_SNIPERSERIES = '#quests:details/dossier/sniperSeries'
DETAILS_DOSSIER_MAXSNIPERSERIES = '#quests:details/dossier/maxSniperSeries'
DETAILS_DOSSIER_INVINCIBLESERIES = '#quests:details/dossier/invincibleSeries'
DETAILS_DOSSIER_MAXINVINCIBLESERIES = '#quests:details/dossier/maxInvincibleSeries'
DETAILS_DOSSIER_DIEHARDSERIES = '#quests:details/dossier/diehardSeries'
DETAILS_DOSSIER_MAXDIEHARDSERIES = '#quests:details/dossier/maxDiehardSeries'
DETAILS_DOSSIER_KILLINGSERIES = '#quests:details/dossier/killingSeries'
DETAILS_DOSSIER_MAXKILLINGSERIES = '#quests:details/dossier/maxKillingSeries'
DETAILS_DOSSIER_PIERCINGSERIES = '#quests:details/dossier/piercingSeries'
DETAILS_DOSSIER_MAXPIERCINGSERIES = '#quests:details/dossier/maxPiercingSeries'
DETAILS_DOSSIER_MAXXP = '#quests:details/dossier/maxXP'
DETAILS_DOSSIER_MAXXPVEHICLE = '#quests:details/dossier/maxXPVehicle'
DETAILS_DOSSIER_MAXFRAGS = '#quests:details/dossier/maxFrags'
DETAILS_DOSSIER_MAXFRAGSVEHICLE = '#quests:details/dossier/maxFragsVehicle'
DETAILS_DOSSIER_MAXDAMAGE = '#quests:details/dossier/maxDamage'
DETAILS_DOSSIER_MAXDAMAGEVEHICLE = '#quests:details/dossier/maxDamageVehicle'
DETAILS_DOSSIER_MARKOFMASTERY = '#quests:details/dossier/markOfMastery'
DETAILS_DOSSIER_WARRIOR = '#quests:details/dossier/warrior'
DETAILS_DOSSIER_INVADER = '#quests:details/dossier/invader'
DETAILS_DOSSIER_SNIPER = '#quests:details/dossier/sniper'
DETAILS_DOSSIER_SNIPER2 = '#quests:details/dossier/sniper2'
DETAILS_DOSSIER_MAINGUN = '#quests:details/dossier/mainGun'
DETAILS_DOSSIER_ARMOREDFIST = '#quests:details/dossier/armoredFist'
DETAILS_DOSSIER_GENIUSFORWARMEDAL = '#quests:details/dossier/geniusForWarMedal'
DETAILS_DOSSIER_WOLFAMONGSHEEPMEDAL = '#quests:details/dossier/wolfAmongSheepMedal'
DETAILS_DOSSIER_TACTICALBREAKTHROUGH = '#quests:details/dossier/tacticalBreakthrough'
DETAILS_DOSSIER_KINGOFTHEHILL = '#quests:details/dossier/kingOfTheHill'
DETAILS_DOSSIER_DEFENDER = '#quests:details/dossier/defender'
DETAILS_DOSSIER_STEELWALL = '#quests:details/dossier/steelwall'
DETAILS_DOSSIER_SUPPORTER = '#quests:details/dossier/supporter'
DETAILS_DOSSIER_SCOUT = '#quests:details/dossier/scout'
DETAILS_DOSSIER_MEDALKAY = '#quests:details/dossier/medalKay'
DETAILS_DOSSIER_MEDALCARIUS = '#quests:details/dossier/medalCarius'
DETAILS_DOSSIER_MEDALKNISPEL = '#quests:details/dossier/medalKnispel'
DETAILS_DOSSIER_MEDALPOPPEL = '#quests:details/dossier/medalPoppel'
DETAILS_DOSSIER_MEDALABRAMS = '#quests:details/dossier/medalAbrams'
DETAILS_DOSSIER_MEDALLECLERC = '#quests:details/dossier/medalLeClerc'
DETAILS_DOSSIER_MEDALLAVRINENKO = '#quests:details/dossier/medalLavrinenko'
DETAILS_DOSSIER_MEDALEKINS = '#quests:details/dossier/medalEkins'
DETAILS_DOSSIER_MEDALWITTMANN = '#quests:details/dossier/medalWittmann'
DETAILS_DOSSIER_MEDALORLIK = '#quests:details/dossier/medalOrlik'
DETAILS_DOSSIER_MEDALOSKIN = '#quests:details/dossier/medalOskin'
DETAILS_DOSSIER_MEDALHALONEN = '#quests:details/dossier/medalHalonen'
DETAILS_DOSSIER_MEDALBURDA = '#quests:details/dossier/medalBurda'
DETAILS_DOSSIER_MEDALBILLOTTE = '#quests:details/dossier/medalBillotte'
DETAILS_DOSSIER_MEDALKOLOBANOV = '#quests:details/dossier/medalKolobanov'
DETAILS_DOSSIER_MEDALFADIN = '#quests:details/dossier/medalFadin'
DETAILS_DOSSIER_TITLESNIPER = '#quests:details/dossier/titleSniper'
DETAILS_DOSSIER_INVINCIBLE = '#quests:details/dossier/invincible'
DETAILS_DOSSIER_DIEHARD = '#quests:details/dossier/diehard'
DETAILS_DOSSIER_RAIDER = '#quests:details/dossier/raider'
DETAILS_DOSSIER_HANDOFDEATH = '#quests:details/dossier/handOfDeath'
DETAILS_DOSSIER_ARMORPIERCER = '#quests:details/dossier/armorPiercer'
DETAILS_DOSSIER_KAMIKAZE = '#quests:details/dossier/kamikaze'
DETAILS_DOSSIER_BEASTHUNTER = '#quests:details/dossier/beasthunter'
DETAILS_DOSSIER_MOUSEBANE = '#quests:details/dossier/mousebane'
DETAILS_DOSSIER_EVILEYE = '#quests:details/dossier/evileye'
DETAILS_DOSSIER_BATTLECITIZEN = '#quests:details/dossier/battleCitizen'
DETAILS_DOSSIER_MEDALRADLEYWALTERS = '#quests:details/dossier/medalRadleyWalters'
DETAILS_DOSSIER_MEDALLAFAYETTEPOOL = '#quests:details/dossier/medalLafayettePool'
DETAILS_DOSSIER_MEDALBRUNOPIETRO = '#quests:details/dossier/medalBrunoPietro'
DETAILS_DOSSIER_MEDALTARCZAY = '#quests:details/dossier/medalTarczay'
DETAILS_DOSSIER_MEDALPASCUCCI = '#quests:details/dossier/medalPascucci'
DETAILS_DOSSIER_MEDALDUMITRU = '#quests:details/dossier/medalDumitru'
DETAILS_DOSSIER_MEDALLEHVASLAIHO = '#quests:details/dossier/medalLehvaslaiho'
DETAILS_DOSSIER_MEDALNIKOLAS = '#quests:details/dossier/medalNikolas'
DETAILS_DOSSIER_FRAGSSINAI = '#quests:details/dossier/fragsSinai'
DETAILS_DOSSIER_SINAI = '#quests:details/dossier/sinai'
DETAILS_DOSSIER_HEROESOFRASSENAY = '#quests:details/dossier/heroesOfRassenay'
DETAILS_DOSSIER_MEDALBROTHERSINARMS = '#quests:details/dossier/medalBrothersInArms'
DETAILS_DOSSIER_MEDALCRUCIALCONTRIBUTION = '#quests:details/dossier/medalCrucialContribution'
DETAILS_DOSSIER_MEDALDELANGLADE = '#quests:details/dossier/medalDeLanglade'
DETAILS_DOSSIER_MEDALTAMADAYOSHIO = '#quests:details/dossier/medalTamadaYoshio'
DETAILS_DOSSIER_BOMBARDIER = '#quests:details/dossier/bombardier'
DETAILS_DOSSIER_HUNTSMAN = '#quests:details/dossier/huntsman'
DETAILS_DOSSIER_STURDY = '#quests:details/dossier/sturdy'
DETAILS_DOSSIER_IRONMAN = '#quests:details/dossier/ironMan'
DETAILS_DOSSIER_FRAGSPATTON = '#quests:details/dossier/fragsPatton'
DETAILS_DOSSIER_PATTONVALLEY = '#quests:details/dossier/pattonValley'
DETAILS_DOSSIER_RANDOM_XP = '#quests:details/dossier/random/xp'
DETAILS_DOSSIER_RANDOM_MAXXP = '#quests:details/dossier/random/maxXP'
DETAILS_DOSSIER_RANDOM_BATTLESCOUNT = '#quests:details/dossier/random/battlesCount'
DETAILS_DOSSIER_RANDOM_WINS = '#quests:details/dossier/random/wins'
DETAILS_DOSSIER_RANDOM_LOSSES = '#quests:details/dossier/random/losses'
DETAILS_DOSSIER_RANDOM_SURVIVEDBATTLES = '#quests:details/dossier/random/survivedBattles'
DETAILS_DOSSIER_RANDOM_LASTBATTLETIME = '#quests:details/dossier/random/lastBattleTime'
DETAILS_DOSSIER_RANDOM_WINANDSURVIVED = '#quests:details/dossier/random/winAndSurvived'
DETAILS_DOSSIER_RANDOM_BATTLEHEROES = '#quests:details/dossier/random/battleHeroes'
DETAILS_DOSSIER_RANDOM_FRAGS = '#quests:details/dossier/random/frags'
DETAILS_DOSSIER_RANDOM_MAXFRAGS = '#quests:details/dossier/random/maxFrags'
DETAILS_DOSSIER_RANDOM_MAXDAMAGE = '#quests:details/dossier/random/maxDamage'
DETAILS_DOSSIER_RANDOM_FRAGS8P = '#quests:details/dossier/random/frags8p'
DETAILS_DOSSIER_RANDOM_FRAGSBEAST = '#quests:details/dossier/random/fragsBeast'
DETAILS_DOSSIER_RANDOM_DIRECTHITS = '#quests:details/dossier/random/directHits'
DETAILS_DOSSIER_RANDOM_SPOTTED = '#quests:details/dossier/random/spotted'
DETAILS_DOSSIER_RANDOM_DAMAGEDEALT = '#quests:details/dossier/random/damageDealt'
DETAILS_DOSSIER_RANDOM_DAMAGERECEIVED = '#quests:details/dossier/random/damageReceived'
DETAILS_DOSSIER_RANDOM_DIRECTHITSRECEIVED = '#quests:details/dossier/random/directHitsReceived'
DETAILS_DOSSIER_RANDOM_CAPTUREPOINTS = '#quests:details/dossier/random/capturePoints'
DETAILS_DOSSIER_RANDOM_DROPPEDCAPTUREPOINTS = '#quests:details/dossier/random/droppedCapturePoints'
DETAILS_DOSSIER_RANDOM_PIERCINGS = '#quests:details/dossier/random/piercings'
DETAILS_DOSSIER_RANDOM_NODAMAGEDIRECTHITSRECEIVED = '#quests:details/dossier/random/noDamageDirectHitsReceived'
DETAILS_DOSSIER_RANDOM_PIERCINGSRECEIVED = '#quests:details/dossier/random/piercingsReceived'
DETAILS_DOSSIER_RANDOM_POTENTIALDAMAGERECEIVED = '#quests:details/dossier/random/potentialDamageReceived'
DETAILS_DOSSIER_RANDOM_DAMAGEBLOCKEDBYARMOR = '#quests:details/dossier/random/damageBlockedByArmor'
DETAILS_DOSSIER_RANDOM_ORIGINALXP = '#quests:details/dossier/random/originalXP'
DETAILS_DOSSIER_RANDOM_DAMAGEASSISTEDTRACK = '#quests:details/dossier/random/damageAssistedTrack'
DETAILS_DOSSIER_RANDOM_DAMAGEASSISTEDRADIO = '#quests:details/dossier/random/damageAssistedRadio'
DETAILS_DOSSIER_RANDOM_SHOTS = '#quests:details/dossier/random/shots'
DETAILS_DOSSIER_RANDOM_EXPLOSIONHITSRECEIVED = '#quests:details/dossier/random/explosionHitsReceived'
DETAILS_DOSSIER_COMPANY_XP = '#quests:details/dossier/company/xp'
DETAILS_DOSSIER_COMPANY_BATTLESCOUNT = '#quests:details/dossier/company/battlesCount'
DETAILS_DOSSIER_COMPANY_WINS = '#quests:details/dossier/company/wins'
DETAILS_DOSSIER_COMPANY_LOSSES = '#quests:details/dossier/company/losses'
DETAILS_DOSSIER_COMPANY_SURVIVEDBATTLES = '#quests:details/dossier/company/survivedBattles'
DETAILS_DOSSIER_COMPANY_FRAGS = '#quests:details/dossier/company/frags'
DETAILS_DOSSIER_COMPANY_DIRECTHITS = '#quests:details/dossier/company/directHits'
DETAILS_DOSSIER_COMPANY_SPOTTED = '#quests:details/dossier/company/spotted'
DETAILS_DOSSIER_COMPANY_DAMAGEDEALT = '#quests:details/dossier/company/damageDealt'
DETAILS_DOSSIER_COMPANY_MAXDAMAGE = '#quests:details/dossier/company/maxDamage'
DETAILS_DOSSIER_COMPANY_DAMAGERECEIVED = '#quests:details/dossier/company/damageReceived'
DETAILS_DOSSIER_COMPANY_CAPTUREPOINTS = '#quests:details/dossier/company/capturePoints'
DETAILS_DOSSIER_COMPANY_DROPPEDCAPTUREPOINTS = '#quests:details/dossier/company/droppedCapturePoints'
DETAILS_DOSSIER_COMPANY_PIERCINGS = '#quests:details/dossier/company/piercings'
DETAILS_DOSSIER_COMPANY_NODAMAGEDIRECTHITSRECEIVED = '#quests:details/dossier/company/noDamageDirectHitsReceived'
DETAILS_DOSSIER_COMPANY_PIERCINGSRECEIVED = '#quests:details/dossier/company/piercingsReceived'
DETAILS_DOSSIER_COMPANY_POTENTIALDAMAGERECEIVED = '#quests:details/dossier/company/potentialDamageReceived'
DETAILS_DOSSIER_COMPANY_DAMAGEBLOCKEDBYARMOR = '#quests:details/dossier/company/damageBlockedByArmor'
DETAILS_DOSSIER_COMPANY_ORIGINALXP = '#quests:details/dossier/company/originalXP'
DETAILS_DOSSIER_COMPANY_DAMAGEASSISTEDTRACK = '#quests:details/dossier/company/damageAssistedTrack'
DETAILS_DOSSIER_COMPANY_DAMAGEASSISTEDRADIO = '#quests:details/dossier/company/damageAssistedRadio'
DETAILS_DOSSIER_CLAN_XP = '#quests:details/dossier/clan/xp'
DETAILS_DOSSIER_CLAN_BATTLESCOUNT = '#quests:details/dossier/clan/battlesCount'
DETAILS_DOSSIER_CLAN_WINS = '#quests:details/dossier/clan/wins'
DETAILS_DOSSIER_CLAN_LOSSES = '#quests:details/dossier/clan/losses'
DETAILS_DOSSIER_CLAN_SURVIVEDBATTLES = '#quests:details/dossier/clan/survivedBattles'
DETAILS_DOSSIER_CLAN_FRAGS = '#quests:details/dossier/clan/frags'
DETAILS_DOSSIER_CLAN_DIRECTHITS = '#quests:details/dossier/clan/directHits'
DETAILS_DOSSIER_CLAN_SPOTTED = '#quests:details/dossier/clan/spotted'
DETAILS_DOSSIER_CLAN_DAMAGEDEALT = '#quests:details/dossier/clan/damageDealt'
DETAILS_DOSSIER_CLAN_MAXDAMAGE = '#quests:details/dossier/clan/maxDamage'
DETAILS_DOSSIER_CLAN_DAMAGERECEIVED = '#quests:details/dossier/clan/damageReceived'
DETAILS_DOSSIER_CLAN_CAPTUREPOINTS = '#quests:details/dossier/clan/capturePoints'
DETAILS_DOSSIER_CLAN_DROPPEDCAPTUREPOINTS = '#quests:details/dossier/clan/droppedCapturePoints'
DETAILS_DOSSIER_CLAN_PIERCINGS = '#quests:details/dossier/clan/piercings'
DETAILS_DOSSIER_CLAN_NODAMAGEDIRECTHITSRECEIVED = '#quests:details/dossier/clan/noDamageDirectHitsReceived'
DETAILS_DOSSIER_CLAN_PIERCINGSRECEIVED = '#quests:details/dossier/clan/piercingsReceived'
DETAILS_DOSSIER_CLAN_POTENTIALDAMAGERECEIVED = '#quests:details/dossier/clan/potentialDamageReceived'
DETAILS_DOSSIER_CLAN_DAMAGEBLOCKEDBYARMOR = '#quests:details/dossier/clan/damageBlockedByArmor'
DETAILS_DOSSIER_CLAN_ORIGINALXP = '#quests:details/dossier/clan/originalXP'
DETAILS_DOSSIER_CLAN_DAMAGEASSISTEDTRACK = '#quests:details/dossier/clan/damageAssistedTrack'
DETAILS_DOSSIER_CLAN_DAMAGEASSISTEDRADIO = '#quests:details/dossier/clan/damageAssistedRadio'
DETAILS_DOSSIER_HISTORICAL_XP = '#quests:details/dossier/historical/xp'
DETAILS_DOSSIER_HISTORICAL_BATTLESCOUNT = '#quests:details/dossier/historical/battlesCount'
DETAILS_DOSSIER_HISTORICAL_WINS = '#quests:details/dossier/historical/wins'
DETAILS_DOSSIER_HISTORICAL_LOSSES = '#quests:details/dossier/historical/losses'
DETAILS_DOSSIER_HISTORICAL_SURVIVEDBATTLES = '#quests:details/dossier/historical/survivedBattles'
DETAILS_DOSSIER_HISTORICAL_FRAGS = '#quests:details/dossier/historical/frags'
DETAILS_DOSSIER_HISTORICAL_DIRECTHITS = '#quests:details/dossier/historical/directHits'
DETAILS_DOSSIER_HISTORICAL_SPOTTED = '#quests:details/dossier/historical/spotted'
DETAILS_DOSSIER_HISTORICAL_DAMAGEDEALT = '#quests:details/dossier/historical/damageDealt'
DETAILS_DOSSIER_HISTORICAL_MAXDAMAGE = '#quests:details/dossier/historical/maxDamage'
DETAILS_DOSSIER_HISTORICAL_DAMAGERECEIVED = '#quests:details/dossier/historical/damageReceived'
DETAILS_DOSSIER_HISTORICAL_CAPTUREPOINTS = '#quests:details/dossier/historical/capturePoints'
DETAILS_DOSSIER_HISTORICAL_DROPPEDCAPTUREPOINTS = '#quests:details/dossier/historical/droppedCapturePoints'
DETAILS_DOSSIER_HISTORICAL_PIERCINGS = '#quests:details/dossier/historical/piercings'
DETAILS_DOSSIER_HISTORICAL_NODAMAGEDIRECTHITSRECEIVED = '#quests:details/dossier/historical/noDamageDirectHitsReceived'
DETAILS_DOSSIER_HISTORICAL_PIERCINGSRECEIVED = '#quests:details/dossier/historical/piercingsReceived'
DETAILS_DOSSIER_HISTORICAL_POTENTIALDAMAGERECEIVED = '#quests:details/dossier/historical/potentialDamageReceived'
DETAILS_DOSSIER_HISTORICAL_DAMAGEBLOCKEDBYARMOR = '#quests:details/dossier/historical/damageBlockedByArmor'
DETAILS_DOSSIER_HISTORICAL_ORIGINALXP = '#quests:details/dossier/historical/originalXP'
DETAILS_DOSSIER_HISTORICAL_DAMAGEASSISTEDTRACK = '#quests:details/dossier/historical/damageAssistedTrack'
DETAILS_DOSSIER_HISTORICAL_DAMAGEASSISTEDRADIO = '#quests:details/dossier/historical/damageAssistedRadio'
DETAILS_DOSSIER_TEAM_XP = '#quests:details/dossier/team/xp'
DETAILS_DOSSIER_TEAM_BATTLESCOUNT = '#quests:details/dossier/team/battlesCount'
DETAILS_DOSSIER_TEAM_WINS = '#quests:details/dossier/team/wins'
DETAILS_DOSSIER_TEAM_LOSSES = '#quests:details/dossier/team/losses'
DETAILS_DOSSIER_TEAM_SURVIVEDBATTLES = '#quests:details/dossier/team/survivedBattles'
DETAILS_DOSSIER_TEAM_FRAGS = '#quests:details/dossier/team/frags'
DETAILS_DOSSIER_TEAM_DIRECTHITS = '#quests:details/dossier/team/directHits'
DETAILS_DOSSIER_TEAM_SPOTTED = '#quests:details/dossier/team/spotted'
DETAILS_DOSSIER_TEAM_DAMAGEDEALT = '#quests:details/dossier/team/damageDealt'
DETAILS_DOSSIER_TEAM_MAXDAMAGE = '#quests:details/dossier/team/maxDamage'
DETAILS_DOSSIER_TEAM_DAMAGERECEIVED = '#quests:details/dossier/team/damageReceived'
DETAILS_DOSSIER_TEAM_CAPTUREPOINTS = '#quests:details/dossier/team/capturePoints'
DETAILS_DOSSIER_TEAM_DROPPEDCAPTUREPOINTS = '#quests:details/dossier/team/droppedCapturePoints'
DETAILS_DOSSIER_TEAM_PIERCINGS = '#quests:details/dossier/team/piercings'
DETAILS_DOSSIER_TEAM_NODAMAGEDIRECTHITSRECEIVED = '#quests:details/dossier/team/noDamageDirectHitsReceived'
DETAILS_DOSSIER_TEAM_PIERCINGSRECEIVED = '#quests:details/dossier/team/piercingsReceived'
DETAILS_DOSSIER_TEAM_POTENTIALDAMAGERECEIVED = '#quests:details/dossier/team/potentialDamageReceived'
DETAILS_DOSSIER_TEAM_DAMAGEBLOCKEDBYARMOR = '#quests:details/dossier/team/damageBlockedByArmor'
DETAILS_DOSSIER_TEAM_ORIGINALXP = '#quests:details/dossier/team/originalXP'
DETAILS_DOSSIER_TEAM_DAMAGEASSISTEDTRACK = '#quests:details/dossier/team/damageAssistedTrack'
DETAILS_DOSSIER_TEAM_DAMAGEASSISTEDRADIO = '#quests:details/dossier/team/damageAssistedRadio'
DETAILS_DOSSIER_LADDER_XP = '#quests:details/dossier/ladder/xp'
DETAILS_DOSSIER_LADDER_BATTLESCOUNT = '#quests:details/dossier/ladder/battlesCount'
DETAILS_DOSSIER_LADDER_WINS = '#quests:details/dossier/ladder/wins'
DETAILS_DOSSIER_LADDER_LOSSES = '#quests:details/dossier/ladder/losses'
DETAILS_DOSSIER_LADDER_SURVIVEDBATTLES = '#quests:details/dossier/ladder/survivedBattles'
DETAILS_DOSSIER_LADDER_FRAGS = '#quests:details/dossier/ladder/frags'
DETAILS_DOSSIER_LADDER_DIRECTHITS = '#quests:details/dossier/ladder/directHits'
DETAILS_DOSSIER_LADDER_SPOTTED = '#quests:details/dossier/ladder/spotted'
DETAILS_DOSSIER_LADDER_DAMAGEDEALT = '#quests:details/dossier/ladder/damageDealt'
DETAILS_DOSSIER_LADDER_MAXDAMAGE = '#quests:details/dossier/ladder/maxDamage'
DETAILS_DOSSIER_LADDER_DAMAGERECEIVED = '#quests:details/dossier/ladder/damageReceived'
DETAILS_DOSSIER_LADDER_CAPTUREPOINTS = '#quests:details/dossier/ladder/capturePoints'
DETAILS_DOSSIER_LADDER_DROPPEDCAPTUREPOINTS = '#quests:details/dossier/ladder/droppedCapturePoints'
DETAILS_DOSSIER_LADDER_PIERCINGS = '#quests:details/dossier/ladder/piercings'
DETAILS_DOSSIER_LADDER_NODAMAGEDIRECTHITSRECEIVED = '#quests:details/dossier/ladder/noDamageDirectHitsReceived'
DETAILS_DOSSIER_LADDER_PIERCINGSRECEIVED = '#quests:details/dossier/ladder/piercingsReceived'
DETAILS_DOSSIER_LADDER_POTENTIALDAMAGERECEIVED = '#quests:details/dossier/ladder/potentialDamageReceived'
DETAILS_DOSSIER_LADDER_DAMAGEBLOCKEDBYARMOR = '#quests:details/dossier/ladder/damageBlockedByArmor'
DETAILS_DOSSIER_LADDER_ORIGINALXP = '#quests:details/dossier/ladder/originalXP'
DETAILS_DOSSIER_LADDER_DAMAGEASSISTEDTRACK = '#quests:details/dossier/ladder/damageAssistedTrack'
DETAILS_DOSSIER_LADDER_DAMAGEASSISTEDRADIO = '#quests:details/dossier/ladder/damageAssistedRadio'
DETAILS_MODIFIERS_TITLE_DISCOUNT = '#quests:details/modifiers/title/discount'
DETAILS_MODIFIERS_TITLE_SELLING = '#quests:details/modifiers/title/selling'
DETAILS_MODIFIERS_TITLE_AVAILABILITY = '#quests:details/modifiers/title/availability'
DETAILS_MODIFIERS_ECONOMICS_SLOTSPRICES = '#quests:details/modifiers/economics/slotsPrices'
DETAILS_MODIFIERS_ECONOMICS_BERTHSPRICES = '#quests:details/modifiers/economics/berthsPrices'
DETAILS_MODIFIERS_ECONOMICS_CREDITSTANKMANCOST = '#quests:details/modifiers/economics/creditsTankmanCost'
DETAILS_MODIFIERS_ECONOMICS_GOLDTANKMANCOST = '#quests:details/modifiers/economics/goldTankmanCost'
DETAILS_MODIFIERS_ECONOMICS_CREDITSDROPSKILLSCOST = '#quests:details/modifiers/economics/creditsDropSkillsCost'
DETAILS_MODIFIERS_ECONOMICS_GOLDDROPSKILLSCOST = '#quests:details/modifiers/economics/goldDropSkillsCost'
DETAILS_MODIFIERS_ECONOMICS_EXCHANGERATE = '#quests:details/modifiers/economics/exchangeRate'
DETAILS_MODIFIERS_ECONOMICS_EXCHANGERATEFORSHELLSANDEQS = '#quests:details/modifiers/economics/exchangeRateForShellsAndEqs'
DETAILS_MODIFIERS_ECONOMICS_PAIDREMOVALCOST = '#quests:details/modifiers/economics/paidRemovalCost'
DETAILS_MODIFIERS_ECONOMICS_PASSPORTCHANGECOST = '#quests:details/modifiers/economics/passportChangeCost'
DETAILS_MODIFIERS_ECONOMICS_FEMALEPASSPORTCHANGECOST = '#quests:details/modifiers/economics/femalePassportChangeCost'
DETAILS_MODIFIERS_ECONOMICS_CLANCREATIONCOST = '#quests:details/modifiers/economics/clanCreationCost'
DETAILS_MODIFIERS_ECONOMICS_FREEXPCONVERSIONDISCRECITY = '#quests:details/modifiers/economics/freeXPConversionDiscrecity'
DETAILS_MODIFIERS_ECONOMICS_FREEXPTOTMANXPRATE = '#quests:details/modifiers/economics/freeXPToTManXPRate'
DETAILS_MODIFIERS_ECONOMICS_AVAILABLE_FREEXPTOTMANXPRATE = '#quests:details/modifiers/economics/available/freeXPToTManXPRate'
DETAILS_MODIFIERS_ECONOMICS_PREMIUMPACKET1 = '#quests:details/modifiers/economics/premiumPacket1'
DETAILS_MODIFIERS_ECONOMICS_PREMIUMPACKET3 = '#quests:details/modifiers/economics/premiumPacket3'
DETAILS_MODIFIERS_ECONOMICS_PREMIUMPACKET7 = '#quests:details/modifiers/economics/premiumPacket7'
DETAILS_MODIFIERS_ECONOMICS_PREMIUMPACKET30 = '#quests:details/modifiers/economics/premiumPacket30'
DETAILS_MODIFIERS_ECONOMICS_PREMIUMPACKET180 = '#quests:details/modifiers/economics/premiumPacket180'
DETAILS_MODIFIERS_ECONOMICS_PREMIUMPACKET360 = '#quests:details/modifiers/economics/premiumPacket360'
DETAILS_MODIFIERS_ECONOMICS_CAMOUFLAGEPACKETINFCOST = '#quests:details/modifiers/economics/camouflagePacketInfCost'
DETAILS_MODIFIERS_ECONOMICS_CAMOUFLAGEPACKET7COST = '#quests:details/modifiers/economics/camouflagePacket7Cost'
DETAILS_MODIFIERS_ECONOMICS_CAMOUFLAGEPACKET30COST = '#quests:details/modifiers/economics/camouflagePacket30Cost'
DETAILS_MODIFIERS_ECONOMICS_INSCRIPTIONPACKETINFCOST = '#quests:details/modifiers/economics/inscriptionPacketInfCost'
DETAILS_MODIFIERS_ECONOMICS_INSCRIPTIONPACKET7COST = '#quests:details/modifiers/economics/inscriptionPacket7Cost'
DETAILS_MODIFIERS_ECONOMICS_INSCRIPTIONPACKET30COST = '#quests:details/modifiers/economics/inscriptionPacket30Cost'
DETAILS_MODIFIERS_ECONOMICS_EMBLEMPACKETINFCOST = '#quests:details/modifiers/economics/emblemPacketInfCost'
DETAILS_MODIFIERS_ECONOMICS_EMBLEMPACKET7COST = '#quests:details/modifiers/economics/emblemPacket7Cost'
DETAILS_MODIFIERS_ECONOMICS_EMBLEMPACKET30COST = '#quests:details/modifiers/economics/emblemPacket30Cost'
DETAILS_MODIFIERS_EQUIPMENT_GOLD = '#quests:details/modifiers/equipment/gold'
DETAILS_MODIFIERS_EQUIPMENT_CREDITS = '#quests:details/modifiers/equipment/credits'
DETAILS_MODIFIERS_EQUIPMENT_GOLDPRICEMULTIPLIER = '#quests:details/modifiers/equipment/goldPriceMultiplier'
DETAILS_MODIFIERS_EQUIPMENT_CREDITSPRICEMULTIPLIER = '#quests:details/modifiers/equipment/creditsPriceMultiplier'
DETAILS_MODIFIERS_OPTDEVICE = '#quests:details/modifiers/optDevice'
DETAILS_MODIFIERS_OPTDEVICE_GOLDPRICEMULTIPLIER = '#quests:details/modifiers/optDevice/goldPriceMultiplier'
DETAILS_MODIFIERS_OPTDEVICE_CREDITSPRICEMULTIPLIER = '#quests:details/modifiers/optDevice/creditsPriceMultiplier'
DETAILS_MODIFIERS_SHELL_GOLD = '#quests:details/modifiers/shell/gold'
DETAILS_MODIFIERS_SHELL_CREDITS = '#quests:details/modifiers/shell/credits'
DETAILS_MODIFIERS_SHELL_GOLDPRICEMULTIPLIER = '#quests:details/modifiers/shell/goldPriceMultiplier'
DETAILS_MODIFIERS_SHELL_CREDITSPRICEMULTIPLIER = '#quests:details/modifiers/shell/creditsPriceMultiplier'
DETAILS_MODIFIERS_SHELL_NATION_GOLDPRICEMULTIPLIER = '#quests:details/modifiers/shell/nation/goldPriceMultiplier'
DETAILS_MODIFIERS_SHELL_NATION_CREDITSPRICEMULTIPLIER = '#quests:details/modifiers/shell/nation/creditsPriceMultiplier'
DETAILS_MODIFIERS_VEHICLE = '#quests:details/modifiers/vehicle'
DETAILS_MODIFIERS_RENTVEHICLE = '#quests:details/modifiers/rentVehicle'
DETAILS_MODIFIERS_VEHRENTPACKAGE = '#quests:details/modifiers/vehRentPackage'
DETAILS_MODIFIERS_VEHICLE_SELL = '#quests:details/modifiers/vehicle/sell'
DETAILS_MODIFIERS_VEHICLE_GOLDPRICEMULTIPLIER = '#quests:details/modifiers/vehicle/goldPriceMultiplier'
DETAILS_MODIFIERS_VEHICLE_CREDITSPRICEMULTIPLIER = '#quests:details/modifiers/vehicle/creditsPriceMultiplier'
DETAILS_MODIFIERS_VEHICLE_RENT_GOLDPRICEMULTIPLIER = '#quests:details/modifiers/vehicle/rent/goldPriceMultiplier'
DETAILS_MODIFIERS_VEHICLE_RENT_CREDITSPRICEMULTIPLIER = '#quests:details/modifiers/vehicle/rent/creditsPriceMultiplier'
DETAILS_MODIFIERS_VEHICLE_NATION_GOLDPRICEMULTIPLIER = '#quests:details/modifiers/vehicle/nation/goldPriceMultiplier'
DETAILS_MODIFIERS_VEHICLE_NATION_CREDITSPRICEMULTIPLIER = '#quests:details/modifiers/vehicle/nation/creditsPriceMultiplier'
DETAILS_MODIFIERS_VEHICLE_RENT_NATION_GOLDPRICEMULTIPLIER = '#quests:details/modifiers/vehicle/rent/nation/goldPriceMultiplier'
DETAILS_MODIFIERS_VEHICLE_RENT_NATION_CREDITSPRICEMULTIPLIER = '#quests:details/modifiers/vehicle/rent/nation/creditsPriceMultiplier'
DETAILS_MODIFIERS_CUSTOMIZATION_VEHCAMOUFLAGE = '#quests:details/modifiers/customization/vehCamouflage'
DETAILS_MODIFIERS_CUSTOMIZATION_GROUPEMBLEM = '#quests:details/modifiers/customization/groupEmblem'
CLASSES_LIGHTTANK = '#quests:classes/lightTank'
CLASSES_MEDIUMTANK = '#quests:classes/mediumTank'
CLASSES_HEAVYTANK = '#quests:classes/heavyTank'
CLASSES_SPG = '#quests:classes/SPG'
CLASSES_AT_SPG = '#quests:classes/AT-SPG'
SEASONAWARDSWINDOW_TITLE = '#quests:seasonAwardsWindow/title'
SEASONAWARDSWINDOW_VEHICLEAWARD_BUTTONABOUT_TEXT = '#quests:seasonAwardsWindow/vehicleAward/buttonAbout/text'
SEASONAWARDSWINDOW_TILEAWARDSTITLE = '#quests:seasonAwardsWindow/tileAwardsTitle'
SEASONAWARDSWINDOW_VEHICLENAME = '#quests:seasonAwardsWindow/vehicleName'
SEASONAWARDSWINDOW_TOKENSCOUNT = '#quests:seasonAwardsWindow/tokensCount'
SEASONAWARDSWINDOW_FEMALETANKMANAWARD_TITLE = '#quests:seasonAwardsWindow/femaleTankmanAward/title'
SEASONAWARDSWINDOW_FEMALETANKMANAWARD_DESCRIPTION = '#quests:seasonAwardsWindow/femaleTankmanAward/description'
SEASONAWARDSWINDOW_COMMENDATIONLISTSAWARD_TITLE = '#quests:seasonAwardsWindow/commendationListsAward/title'
SEASONAWARDSWINDOW_COMMENDATIONLISTSAWARD_DESCRIPTION = '#quests:seasonAwardsWindow/commendationListsAward/description'
QUESTSPERSONALWELCOMEVIEW_MAINTITLE_TEXTLABEL = '#quests:QuestsPersonalWelcomeView/mainTitle/textLabel'
QUESTSPERSONALWELCOMEVIEW_BTNLABEL = '#quests:QuestsPersonalWelcomeView/btnLabel'
QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_HEADER_BLOCK1 = '#quests:QuestsPersonalWelcomeView/textBlock/header/block1'
QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_BODY_BLOCK1 = '#quests:QuestsPersonalWelcomeView/textBlock/body/block1'
QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_HEADER_BLOCK2 = '#quests:QuestsPersonalWelcomeView/textBlock/header/block2'
QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_BODY_BLOCK2 = '#quests:QuestsPersonalWelcomeView/textBlock/body/block2'
QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_HEADER_BLOCK3 = '#quests:QuestsPersonalWelcomeView/textBlock/header/block3'
QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_BODY_BLOCK3 = '#quests:QuestsPersonalWelcomeView/textBlock/body/block3'
QUESTSPERSONALWELCOMEVIEW_ANNOUNCEMENTTEXT = '#quests:QuestsPersonalWelcomeView/announcementText'
TILECHAINSVIEW_BUTTONBACK_TEXT = '#quests:tileChainsView/buttonBack/text'
TILECHAINSVIEW_TITLE = '#quests:tileChainsView/title'
TILECHAINSVIEW_FILTERSLABEL_TEXT = '#quests:tileChainsView/filtersLabel/text'
TILECHAINSVIEW_NOTASKSLABEL_TEXT = '#quests:tileChainsView/noTasksLabel/text'
TILECHAINSVIEW_TASKTYPEFILTER_ALLITEMS_TEXT = '#quests:tileChainsView/taskTypeFilter/allItems/text'
TILECHAINSVIEW_TASKTYPEFILTER_COMPLETEDITEMS_TEXT = '#quests:tileChainsView/taskTypeFilter/completedItems/text'
TILECHAINSVIEW_TASKTYPEFILTER_ITEMSINPROGRESS_TEXT = '#quests:tileChainsView/taskTypeFilter/itemsInProgress/text'
TILECHAINSVIEW_TASKTYPEFILTER_AWARDSNOTRECEIVEDITEMS_TEXT = '#quests:tileChainsView/taskTypeFilter/awardsNotReceivedItems/text'
TILECHAINSVIEW_TASKLIST_TEXT = '#quests:tileChainsView/taskList/text'
TILECHAINSVIEW_TASKSPROGRESS_TEXT = '#quests:tileChainsView/tasksProgress/text'
TILECHAINSVIEW_TASKTYPE_COMPLETED_TEXT = '#quests:tileChainsView/taskType/completed/text'
TILECHAINSVIEW_TASKTYPE_FULLCOMPLETED_TEXT = '#quests:tileChainsView/taskType/fullCompleted/text'
TILECHAINSVIEW_TASKTYPE_INPROGRESS_TEXT = '#quests:tileChainsView/taskType/inProgress/text'
TILECHAINSVIEW_TASKTYPE_AVAILABLE_TEXT = '#quests:tileChainsView/taskType/available/text'
TILECHAINSVIEW_TASKTYPE_UNAVAILABLE_TEXT = '#quests:tileChainsView/taskType/unavailable/text'
TILECHAINSVIEW_TASKTYPE_AWARDNOTRECEIVED_TEXT = '#quests:tileChainsView/taskType/awardNotReceived/text'
TILECHAINSVIEW_STATISTICSLABEL_TEXT = '#quests:tileChainsView/statisticsLabel/text'
TILECHAINSVIEW_CHAINNAME_LIGHTTANK = '#quests:tileChainsView/chainName/lightTank'
TILECHAINSVIEW_CHAINNAME_MEDIUMTANK = '#quests:tileChainsView/chainName/mediumTank'
TILECHAINSVIEW_CHAINNAME_HEAVYTANK = '#quests:tileChainsView/chainName/heavyTank'
TILECHAINSVIEW_CHAINNAME_AT_SPG = '#quests:tileChainsView/chainName/AT-SPG'
TILECHAINSVIEW_CHAINNAME_SPG = '#quests:tileChainsView/chainName/SPG'
QUESTSCHAINPROGRESSVIEW_HEADER = '#quests:QuestsChainProgressView/header'
QUESTSCHAINPROGRESSVIEW_MAINAWARD = '#quests:QuestsChainProgressView/mainAward'
QUESTSCHAINPROGRESSVIEW_CHAINSPROGRESS = '#quests:QuestsChainProgressView/chainsProgress'
QUESTSCHAINPROGRESSVIEW_ABOUTTANKBTNLABEL = '#quests:QuestsChainProgressView/aboutTankBtnLabel'
QUESTSCHAINPROGRESSVIEW_SHOWINHANGARBTNLABEL = '#quests:QuestsChainProgressView/showInHangarBtnLabel'
QUESTSCHAINPROGRESSVIEW_CHAINPROGRESSCOUNT = '#quests:QuestsChainProgressView/chainProgressCount'
QUESTSCHAINPROGRESSVIEW_FALLOUTCHAINPROGRESSCOUNT = '#quests:QuestsChainProgressView/falloutChainProgressCount'
QUESTTASKDETAILSVIEW_HEADER = '#quests:QuestTaskDetailsView/header'
QUESTTASKDETAILSVIEW_MAINCONDITIONS = '#quests:QuestTaskDetailsView/mainConditions'
QUESTTASKDETAILSVIEW_ADDITIONALCONDITIONS = '#quests:QuestTaskDetailsView/additionalConditions'
QUESTTASKDETAILSVIEW_REQUIREMENTS = '#quests:QuestTaskDetailsView/requirements'
QUESTTASKDETAILSVIEW_REQUIREMENTS_TEXT = '#quests:QuestTaskDetailsView/requirements/text'
QUESTTASKDETAILSVIEW_REQUIREMENTS_MORE8LVL = '#quests:QuestTaskDetailsView/requirements/more8lvl'
QUESTTASKDETAILSVIEW_REQUIREMENTS_ONLY10LVL = '#quests:QuestTaskDetailsView/requirements/only10lvl'
QUESTTASKDETAILSVIEW_DESCRIPTION = '#quests:QuestTaskDetailsView/description'
QUESTTASKDETAILSVIEW_BTNLABEL_BEGIN = '#quests:QuestTaskDetailsView/btnLabel/begin'
QUESTTASKDETAILSVIEW_BTNLABEL_CANCEL = '#quests:QuestTaskDetailsView/btnLabel/cancel'
QUESTTASKDETAILSVIEW_BTNLABEL_TAKEAWARD = '#quests:QuestTaskDetailsView/btnLabel/takeAward'
QUESTTASKDETAILSVIEW_BTNLABEL_REPEAT = '#quests:QuestTaskDetailsView/btnLabel/repeat'
QUESTTASKDETAILSVIEW_TASKDESCRIPTION_DONE = '#quests:QuestTaskDetailsView/taskDescription/done'
QUESTTASKDETAILSVIEW_TASKDESCRIPTION_EXCELLENTDONE = '#quests:QuestTaskDetailsView/taskDescription/excellentDone'
QUESTTASKDETAILSVIEW_TASKDESCRIPTION_DOPREVTASKS = '#quests:QuestTaskDetailsView/taskDescription/doPrevTasks'
QUESTTASKDETAILSVIEW_FALLOUT_TASKDESCRIPTION_DOPREVTASKS = '#quests:QuestTaskDetailsView/fallout/taskDescription/doPrevTasks'
QUESTTASKDETAILSVIEW_TASKDESCRIPTION_AVAILABLE = '#quests:QuestTaskDetailsView/taskDescription/available'
QUESTTASKDETAILSVIEW_TASKDESCRIPTION_INPROGRESS = '#quests:QuestTaskDetailsView/taskDescription/inProgress'
QUESTTASKDETAILSVIEW_TASKDESCRIPTION_TAKEAWARD = '#quests:QuestTaskDetailsView/taskDescription/takeAward'
QUESTTASKDETAILSVIEW_FORADDITIONALAWARD = '#quests:QuestTaskDetailsView/forAdditionalAward'
TILECHAINSVIEW_CHAINNAME_MULTITEAM = '#quests:tileChainsView/chainName/multiteam'
TILECHAINSVIEW_CHAINNAME_CLASSIC = '#quests:tileChainsView/chainName/classic'
BEGINNERQUESTS_DETAILS_CONDITIONSTITLE = '#quests:beginnerQuests/details/conditionsTitle'
BEGINNERQUESTS_DETAILS_DESCRIPTIONTITLE = '#quests:beginnerQuests/details/descriptionTitle'
BEGINNERQUESTS_DETAILS_BUTTONSHOWVIDEOTIP = '#quests:beginnerQuests/details/buttonShowVideoTip'
BEGINNERQUESTS_DETAILS_BUTTONSTARTQUEST = '#quests:beginnerQuests/details/buttonStartQuest'
BEGINNERQUESTS_DETAILS_NOAWARD = '#quests:beginnerQuests/details/noAward'
QUESTSCONTROL_ADDITIONALTITLE_ENUM = (QUESTSCONTROL_ADDITIONALTITLE_NEEDRECEIVEDAWARD,
QUESTSCONTROL_ADDITIONALTITLE_FREESLOTSANDFREEQUESTS,
QUESTSCONTROL_ADDITIONALTITLE_FIRSTRUN,
QUESTSCONTROL_ADDITIONALTITLE_EMPTY)
QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_BODY_ENUM = (QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_BODY_BLOCK1, QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_BODY_BLOCK2, QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_BODY_BLOCK3)
QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_HEADER_ENUM = (QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_HEADER_BLOCK1, QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_HEADER_BLOCK2, QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_HEADER_BLOCK3)
@staticmethod
def questscontrol_additionaltitle(key):
outcome = '#quests:questsControl/additionalTitle/%s' % key
if outcome not in QUESTS.QUESTSCONTROL_ADDITIONALTITLE_ENUM:
raise Exception, 'locale key "' + outcome + '" was not found'
return outcome
@staticmethod
def questspersonalwelcomeview_textblock_body(key):
outcome = '#quests:QuestsPersonalWelcomeView/textBlock/body/%s' % key
if outcome not in QUESTS.QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_BODY_ENUM:
raise Exception, 'locale key "' + outcome + '" was not found'
return outcome
@staticmethod
def questspersonalwelcomeview_textblock_header(key):
outcome = '#quests:QuestsPersonalWelcomeView/textBlock/header/%s' % key
if outcome not in QUESTS.QUESTSPERSONALWELCOMEVIEW_TEXTBLOCK_HEADER_ENUM:
raise Exception, 'locale key "' + outcome + '" was not found'
return outcome
| [
"info@webium.sk"
] | info@webium.sk |
0bb6bbb5cb9dab50c91c9d10e108fdd734f381b9 | c77b2f06a971d5e77a3dc71e972ef27fc85475a5 | /algo_ds/_general/pattern_matching_naive.py | b1c431aa83f479f3a7c749328a24bc2fee5b1372 | [] | no_license | thefr33radical/codeblue | f25520ea85110ed09b09ae38e7db92bab8285b2f | 86bf4a4ba693b1797564dca66b645487973dafa4 | refs/heads/master | 2022-08-01T19:05:09.486567 | 2022-07-18T22:56:05 | 2022-07-18T22:56:05 | 110,525,490 | 3 | 6 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | '''
txt[] = "THIS IS A TEST TEXT"
pat[] = "TEST"
'''
import re
def compute():
txt= "THIS IS A TEST TEXT TEST"
pat = "TEST"
m = re.search(pat, txt)
print(m.span())
if pat in txt:
print("yes")
if __name__=="__main__":
compute() | [
"imperial.gauntlet@gmail.com"
] | imperial.gauntlet@gmail.com |
af117a214e9ee11f27d2d658208023a830eba035 | 284c2fab037fb55a13ef227d93eafe94a5558b69 | /extract_face_img.py | 4a5b414861ff7aa493b2c2139725f5caff389af1 | [] | no_license | emsha/bumble_bot | 757e3b2ce8422732c8887e7af2d703f1fd14a768 | 063520a4f64fbd343a1355fccf59b02d470932fe | refs/heads/master | 2020-09-24T03:40:50.475394 | 2019-12-03T15:52:10 | 2019-12-03T15:52:10 | 225,652,916 | 8 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,513 | py | import pyscreenshot as ImageGrab
import numpy as np
import cv2 as cv
def extractFacesFromScreen(full_screen_path, save_path):
# grab fullscreen
im = ImageGrab.grab()
im_np = np.array(im)
# save image file
# print(0)
# img_name = full_screen_path+'screenshot.png'
# print(1)
# im.save(img_name)
# print(2)
# show image in a window
# im.show()
face_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml')
eye_cascade = cv.CascadeClassifier('haarcascade_eye.xml')
# img = cv.imread(img_name)
img = im_np[:,:,:3] # strip alpha channel
img = cv.cvtColor(img,cv.COLOR_BGR2RGB) # convert numpy rgb to cv bgr
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5,) #minSize=(50,50))
face_crops = []
for (x,y,w,h) in faces:
# scale rectangles
scale_factor = 1.5
w_s = w * scale_factor
h_s = h * scale_factor
x_diff = (w_s - w)/2
y_diff = (h_s - h)/2
x -= x_diff
y -= y_diff
w = w_s
h = h_s
x = int(x)
y = int(y)
w = int(w)
h = int(h)
# make crops
crop_img = img[y:y+h, x:x+w]
face_crops.append(crop_img)
return face_crops
if __name__ == '__main__':
crops = extractFacesFromScreen('./screenshots/', './screenshots/crops/')
for c in crops:
cv.imshow('e',c)
cv.waitKey(0)
cv.destroyAllWindows() | [
"maxshashoua@gmail.com"
] | maxshashoua@gmail.com |
ba3b26fcd5bce849ee538c53641f1399c71178b4 | c1966debc6c164cefe7f5171c55661cb27b41162 | /cal/urls.py | aa85a63b8336c2bdc4d28d5b089aa4a213463961 | [] | no_license | wrabbit/skibi | 2b11244bfd2756bbfd7db69473539721b5ed5380 | a46543ac3f57187cd667b86de2780f4ded26b8d1 | refs/heads/master | 2021-01-17T09:32:43.075445 | 2008-07-03T16:15:00 | 2008-07-03T16:15:00 | 30,975 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | from django.conf.urls.defaults import *
from cal.controller import CalendarController
from cal.models import Event
## calendar view
urlpatterns = patterns('cal.views',
(r'^view/.*$', 'view'),
(r'^upd/.*$', 'updEvent'),
(r'^add/.*$', 'addEvent'),
(r'^del/.*$', 'delEvent'),
(r'^.*$', 'view',{'header':'Organizator'}),
)
| [
"kleiner1@op.pl"
] | kleiner1@op.pl |
2e8ddd35d6897244933cdd72f0cd5cb2f56f7102 | b8208e7ca345858b7a91bb1b2bedd1d18586d497 | /Project03/Project03/wsgi.py | aea2c14f4ee857c86ef61bc174b78cc64c57f921 | [] | no_license | HelenC11/CS1XA3 | 7b6c643aa4717dbbdbdddaef360fa7271e455c17 | 2f47b5901a8bd99cb9a531cbc013104e382dea24 | refs/heads/master | 2023-02-16T12:02:18.226502 | 2021-01-19T03:39:10 | 2021-01-19T03:39:10 | 232,410,391 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
WSGI config for Project03 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Project03.settings')
application = get_wsgi_application()
| [
"chenh214@mcmaster.ca"
] | chenh214@mcmaster.ca |
a8224a39756bc5a122a4204c1b62ab8ba4709b3d | 5503e9d542300d9a671b783aa7fea4d909e6b909 | /migrations/versions/ed91694044f6_.py | 39622bc3e0e12cb6737ea6250591775261fcde9b | [] | no_license | Ch3xd/WebPenFw | d874865ab38cef3eb886dc219c7e66859921cbcd | 0729fe9a3af6d06ebb79819a9d363e15ad8d369b | refs/heads/master | 2023-04-02T06:15:43.079736 | 2021-04-11T09:34:23 | 2021-04-11T09:34:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | """empty message
Revision ID: ed91694044f6
Revises:
Create Date: 2021-01-27 21:51:24.466508
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'ed91694044f6'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=16), nullable=True),
sa.Column('cookie', sa.String(length=2300), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
# ### end Alembic commands ###
| [
"36556439+18508951871@users.noreply.github.com"
] | 36556439+18508951871@users.noreply.github.com |
39c7e607f72e93a7faa67ff08f4f3c8ebaa34496 | af0e71bd2a6e3d0bd33fcde91ff893fb4b712d52 | /multiAgents.py | c1818ed26cd43a3fda29189fe0d217443a52ba50 | [] | no_license | dhanashripp/AI_Adverserial_search | 20c8706ef6baa7e8703c3635ba09c8d55ad975c7 | f18a5752e7e7774c602aa42a715f0a91e71464e5 | refs/heads/master | 2021-07-21T06:01:18.916623 | 2017-11-01T01:00:15 | 2017-11-01T01:00:15 | 109,068,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,432 | py | # multiAgents.py
# --------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
from util import manhattanDistance
from game import Directions
import random, util
from game import Agent
class ReflexAgent(Agent):
"""
A reflex agent chooses an action at each choice point by examining
its alternatives via a state evaluation function.
The code below is provided as a guide. You are welcome to change
it in any way you see fit, so long as you don't touch our method
headers.
"""
def getAction(self, gameState):
"""
You do not need to change this method, but you're welcome to.
getAction chooses among the best options according to the evaluation function.
Just like in the previous project, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East, Stop}
"""
# Collect legal moves and successor states
legalMoves = gameState.getLegalActions()
# Choose one of the best actions
scores = [self.evaluationFunction(gameState, action) for action in legalMoves]
bestScore = max(scores)
bestIndices = [index for index in range(len(scores)) if scores[index] == bestScore]
chosenIndex = random.choice(bestIndices) # Pick randomly among the best
return legalMoves[chosenIndex]
def evaluationFunction(self, currentGameState, action):
"""
Design a better evaluation function here.
The evaluation function takes in the current and proposed successor
GameStates (pacman.py) and returns a number, where higher numbers are better.
The code below extracts some useful information from the state, like the
remaining food (newFood) and Pacman position after moving (newPos).
newScaredTimes holds the number of moves that each ghost will remain
scared because of Pacman having eaten a power pellet.
Print out these variables to see what you're getting, then combine them
to create a masterful evaluation function.
"""
successorGameState = currentGameState.generatePacmanSuccessor(action)
newGhostStates = successorGameState.getGhostStates()
oldFood = currentGameState.getFood()
foodList = oldFood.asList()
minDistance = 100000000-1
distance = 0
newPos = successorGameState.getPacmanPosition()
if action == 'Stop':
return -10000000
for ghost in newGhostStates:
if (manhattanDistance(ghost.getPosition(),newPos)==0):
if(ghost.scaredTimer==0):
return -10000000
for i in range(len(foodList)):
distance = (manhattanDistance(foodList[i], newPos))
if (distance < minDistance):
minDistance = distance
return -1*minDistance
def scoreEvaluationFunction(currentGameState):
"""
This default evaluation function just returns the score of the state.
The score is the same one displayed in the Pacman GUI.
This evaluation function is meant for use with adversarial search agents
(not reflex agents).
"""
return currentGameState.getScore()
class MultiAgentSearchAgent(Agent):
"""
This class provides some common elements to all of your
multi-agent searchers. Any methods defined here will be available
to the MinimaxPacmanAgent, AlphaBetaPacmanAgent & ExpectimaxPacmanAgent.
You *do not* need to make any changes here, but you can if you want to
add functionality to all your adversarial search agents. Please do not
remove anything, however.
Note: this is an abstract class: one that should not be instantiated. It's
only partially specified, and designed to be extended. Agent (game.py)
is another abstract class.
"""
def __init__(self, evalFn = 'scoreEvaluationFunction', depth = '2'):
self.index = 0 # Pacman is always agent index 0
self.evaluationFunction = util.lookup(evalFn, globals())
self.depth = int(depth)
class MinimaxAgent(MultiAgentSearchAgent):
"""
This agent returns the minimax action from the current gameState using self.depth and self.evaluationFunction.
It uses a simple recursive computation of the minimax values of each successor state. The recursion proceeds
all the way down to the leaves of the tree , and then the values are backed up through the tree
as the recursion unwinds.
"""
def getAction(self, gameState):
"""
Returns the minimax action from the current gameState using self.depth
and self.evaluationFunction.
Here are some method calls that might be useful when implementing minimax.
gameState.getLegalActions(agentIndex):
Returns a list of legal actions for an agent
agentIndex=0 means Pacman, ghosts are >= 1
gameState.generateSuccessor(agentIndex, action):
Returns the successor game state after an agent takes an action
gameState.getNumAgents():
Returns the total number of agents in the game
"""
agentIndex=0
gridDepth=self.depth
NumAgents = gameState.getNumAgents();
decision=["", float('0')]
if(gridDepth==0 or gameState.isWin() or gameState.isLose()):
decision = self.evaluationFunction(gameState)
decision = self.pacmanTurn(gameState, gridDepth, NumAgents, agentIndex)
return decision[0]
def noOfGhosts(self,NumAgents,turn):
return NumAgents-turn
def pacmanTurn(self, gameState, gridDepth, NumAgents, agentIndex):
"""
The Max agent function which internally calls Min agent
"""
v = ["", -float('inf')]
val = float('0')
decision1=[]
if(gridDepth==0 or gameState.isWin() or gameState.isLose()):
return self.evaluationFunction(gameState)
for legal_action in gameState.getLegalActions(agentIndex):
if legal_action == "Stop":
continue
successorState = gameState.generateSuccessor(agentIndex, legal_action)
decision1.append(self.ghostTurn(successorState, gridDepth, self.noOfGhosts(NumAgents,2), NumAgents, (agentIndex+1)%NumAgents))
for x in decision1:
if isinstance(x,float):
val=x
else:
for x1 in x:
if isinstance(x1,float):
val=x1
if val > v[1]:
#Update the value so as to choose the Maximum among the values returned by evaluationFunction
v = [legal_action, val] #Return the legal action with Max value
return v
def ghostTurn(self, gameState, gridDepth, numGhosts, NumAgents, agentIndex):
v = ["", float('inf')]
val = float('0')
decision1=[]
if(gridDepth==0 or gameState.isWin() or gameState.isLose()):
return self.evaluationFunction(gameState)
if not gameState.getLegalActions(agentIndex):
return self.evaluationFunction(gameState)
for legal_action in gameState.getLegalActions(agentIndex):
if legal_action == "Stop":
continue
successorState = gameState.generateSuccessor(agentIndex, legal_action)
if(numGhosts == 0):
decision1.append(self.pacmanTurn(successorState, gridDepth-1, NumAgents, (agentIndex+1)%NumAgents))
else:
decision1.append(self.ghostTurn(successorState, gridDepth, self.noOfGhosts(numGhosts,1), NumAgents, (agentIndex+1)%NumAgents))
for x in decision1:
if isinstance(x,float):
val=x
else:
for x1 in x:
if isinstance(x1,float):
val=x1
if val < v[1]:
#Update the value so as to choose the Minmum among the values returned by evaluationFunction
v = [legal_action, val] #Return the legal action with minimum value
return v
class AlphaBetaAgent(MultiAgentSearchAgent):
"""
Your minimax agent with alpha-beta pruning. Returns the alpha-beta action using self.depth and self.evaluationFunction
Here we prune the search tree when Alpha value surpasses Beta value.
"""
def getAction(self, gameState):
"""
Returns the minimax action using self.depth and self.evaluationFunction
"""
agentIndex=0
gridDepth=self.depth
NumAgents = gameState.getNumAgents();
alpha = -float('inf')
beta = float('inf')
decision=["", float('0')]
if(gridDepth == 0 or gameState.isWin() or gameState.isLose()):
decision = self.evaluationFunction(gameState)
decision = self.pacmanTurn(gameState, gridDepth*NumAgents, NumAgents, agentIndex, alpha, beta)
return decision[0]
def pacmanTurn(self, gameState, gridDepth, NumAgents, agentIndex, alpha, beta):
"""
The Max agent function which internally calls Min agent
"""
v = ["", -float('inf')]
val = float('0')
decision1=[]
if(gridDepth==0 or gameState.isWin() or gameState.isLose()):
return self.evaluationFunction(gameState)
for legal_action in gameState.getLegalActions(agentIndex):
if legal_action == "Stop":
continue
successorState = gameState.generateSuccessor(agentIndex, legal_action)
decision1.append( self.ghostTurn(successorState, gridDepth-1, NumAgents-2, NumAgents, (agentIndex+1)%NumAgents, alpha, beta))
for x in decision1:
if isinstance(x,float):
val=x
else:
for x1 in x:
if isinstance(x1,float):
val=x1
if val > v[1]:
#Update the value so as to choose the Maximum among the values returned by evaluationFunction
v = [legal_action, val] #Return the legal action with Max value
#Update alpha value in pacman turn
alpha = max(alpha, v[1])
#Check whether Alpha value surpasses Beta in order to prune the search space
if alpha > beta:
return v
return v
def ghostTurn(self, gameState, gridDepth, NumMins, NumAgents, agentIndex, alpha, beta):
"""
The Min agent function which internally calls either Max or Min based on number of agents remained
"""
v = ["", float('inf')]
val = float('0')
decision1=[]
if(gridDepth==0 or gameState.isWin() or gameState.isLose()):
return self.evaluationFunction(gameState)
if not gameState.getLegalActions(agentIndex):
return self.evaluationFunction(gameState)
for legal_action in gameState.getLegalActions(agentIndex):
if legal_action == "Stop":
continue
successorState = gameState.generateSuccessor(agentIndex, legal_action)
if(NumMins == 0):
decision1.append( self.pacmanTurn(successorState, gridDepth-1, NumAgents, (agentIndex+1)%NumAgents, alpha, beta))
else:
decision1.append(self.ghostTurn(successorState, gridDepth-1, NumMins-1, NumAgents, (agentIndex+1)%NumAgents, alpha, beta))
for x in decision1:
if isinstance(x,float):
val=x
else:
for x1 in x:
if isinstance(x1,float):
val=x1
if val < v[1]:
#Update the value so as to choose the Minmum among the values returned by evaluationFunction
v = [legal_action, val] #Return the legal action with minimum value
#update beta value in ghost turn
beta = min(beta, v[1])
#Check whether Alpha value surpasses Beta in order to prune the search space
if beta < alpha:
return v
return v
class ExpectimaxAgent(MultiAgentSearchAgent):
"""
Advanced case of MinimaxAgent which takes into consideration randomness of the ghost. Here the Max function(pacmanTurn)
is kept the same as that of MinimaxAgent. The expectiGhost() function in place of ghostTurn(), takes care of randomness
of the ghosts
"""
def getAction(self, gameState):
"""
Returns the expectimax action using self.depth and self.evaluationFunction
All ghosts are modeled as choosing uniformly at random from their
legal moves.
"""
agentIndex=0
gridDepth=self.depth
NumAgents = gameState.getNumAgents();
decision=["", float('0')]
if(gridDepth==0 or gameState.isWin() or gameState.isLose()):
decision = self.evaluationFunction(gameState)
decision = self.pacmanTurn(gameState,gridDepth, NumAgents, agentIndex)
return decision[0]
def noOfGhosts(self, NumAgents, turn):
"""
returns the number of remaining ghosts
"""
return NumAgents-turn
def pacmanTurn(self, gameState, gridDepth, NumAgents, agentIndex):
"""
The Max agent function which internally calls Min agent
"""
v = ["", -float('inf')]
val = float('0')
decision1=[]
if(gridDepth==0 or gameState.isWin() or gameState.isLose()):
return self.evaluationFunction(gameState)
for legal_action in gameState.getLegalActions(agentIndex):
if legal_action == "Stop":
continue
successorState = gameState.generateSuccessor(agentIndex, legal_action)
decision1.append(self.expectiGhost(successorState, gridDepth, self.noOfGhosts(NumAgents,2), NumAgents, (agentIndex+1)%NumAgents))
for x in decision1:
if isinstance(x,float):
val = x
else:
for x1 in x:
if isinstance(x1, float):
val = x1
if val > v[1]:
#Update the value so as to choose the Maximum among the values returned by evaluationFunction
v = [legal_action, val] #Return the legal action with Max value
return v
def expectiGhost(self, gameState, gridDepth, numGhosts, NumAgents, agentIndex):
"""
The Min agent function which internally calls either Max or Min based on number of agents remained.
Here instead of always choosing minimum value in GhostTurn (Min), we write write another function expectiGhost,
which takes into consideration randomness of the ghost. It calculates the the average of all the values returned
by evaluation function.
"""
v = ["", 0]
val = float('0')
decision1 = []
if(gridDepth==0 or gameState.isWin() or gameState.isLose()):
return self.evaluationFunction(gameState)
if not gameState.getLegalActions(agentIndex):
return self.evaluationFunction(gameState)
#The multiplication factor for finding average of the values returned by evaluationFunction
probability = 1.0/len(gameState.getLegalActions(agentIndex))
for legal_action in gameState.getLegalActions(agentIndex):
if legal_action == "Stop":
continue
successorState = gameState.generateSuccessor(agentIndex, legal_action)
if(numGhosts == 0):
decision1.append(self.pacmanTurn(successorState, gridDepth-1, NumAgents, (agentIndex+1)%NumAgents))
else:
decision1.append(self.expectiGhost(successorState, gridDepth, self.noOfGhosts(numGhosts,1), NumAgents, (agentIndex+1)%NumAgents))
for x in decision1:
if isinstance(x,float):
val=x
else:
for x1 in x:
if isinstance(x1,float):
val=x1
#the multiplication factor is multiplied to each value and the average is calculated
v[1] += val*probability
v[0] = legal_action
return v
def betterEvaluationFunction(currentGameState):
"""
Your extreme ghost-hunting, pellet-nabbing, food-gobbling, unstoppable
evaluation function (question 5).
DESCRIPTION: <write something here so we know what you did>
"""
"*** YOUR CODE HERE ***"
util.raiseNotDefined()
# Abbreviation
better = betterEvaluationFunction
| [
"dhanashripp@gmail.com"
] | dhanashripp@gmail.com |
857f35afbdcd66b55da744ea9218943a36d467af | a193ed2a98f0f53e9a8b68641e4bd9c45c8b1c4d | /main.py | 0c07b8c8f00ccc7e72160de8a78291bac897da90 | [] | no_license | silvanwalz/pythonkurs | c9fbedc784d525d8a0d7112455581c8f0c9d9a66 | ba43c1e3272e06fa81915674cd9d95e5ab969574 | refs/heads/main | 2023-08-16T20:48:05.857996 | 2021-10-05T14:48:39 | 2021-10-05T14:48:39 | 408,350,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | from module1.file1 import importfun
def main():
importfun()
if __name__ == '__main__':
main() | [
"silvan.walz@hotmail.ch"
] | silvan.walz@hotmail.ch |
0da0f290f94f52c7dd23b74744834633d0fd949c | e5135867a8f2f5923b21523489c8f246d9c5a13a | /kaleo/management/commands/infinite_invites.py | b69ea50f7cd1935b122e67b8b0c4cb9bd4126e13 | [
"BSD-3-Clause"
] | permissive | exitio/kaleo | 01574cc0675211a586995e08a4e19b6a1c9063ee | 53e73e0acf3429d83b45e6b22b1a6ec76ac69c12 | refs/heads/master | 2021-01-21T01:27:44.609605 | 2013-01-31T15:50:10 | 2013-01-31T15:50:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 471 | py | import sys
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from kaleo.models import InvitationStat
class Command(BaseCommand):
help = "Sets invites_allocated to -1 to represent infinite invites."
def handle(self, *args, **kwargs):
for user in User.objects.all():
stat, _ = InvitationStat.objects.get_or_create(user=user)
stat.invites_allocated = -1
stat.save()
| [
"me@ntucker.me"
] | me@ntucker.me |
d924c27a884790f3eccceeacebb5b4ef409f3586 | da5ef82554c6c0413193b7c99192edd70fed58dd | /mozdns/soa/tests.py | 529c22573446ed774dbfbfa53a43c6d989673fa2 | [] | no_license | rtucker-mozilla/mozilla_inventory | d643c7713c65aa870e732e18aaf19ce677e277b7 | bf9154b0d77705d8c0fe1a9a35ce9c1bd60fcbea | refs/heads/master | 2020-12-24T17:17:37.621418 | 2013-04-11T10:39:41 | 2013-04-11T10:39:41 | 2,709,399 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,757 | py | from django.test import TestCase
from django.core.exceptions import ValidationError
from mozdns.soa.models import SOA
from mozdns.domain.models import Domain
class SOATests(TestCase):
def setUp(self):
pass
def do_generic_add(self, primary, contact, retry, refresh, description):
soa = SOA(primary=primary, contact=contact,
retry=retry, refresh=refresh, description=description)
soa.save()
soa.save()
rsoa = SOA.objects.filter(primary=primary, contact=contact,
retry=retry, refresh=refresh)
self.assertTrue(len(rsoa) == 1)
return soa
def test_add_soa(self):
primary = "ns1.oregonstate.edu"
contact = "admin.oregonstate.edu"
retry = 1234
refresh = 1234123
description = "1"
self.do_generic_add(
primary, contact, retry, refresh, description=description)
soa = SOA.objects.filter(primary=primary, contact=contact,
retry=retry, refresh=refresh)
soa[0].save()
self.assertTrue(soa)
soa[0].__repr__()
soa = soa[0]
self.assertTrue(soa.details())
self.assertTrue(soa.get_absolute_url())
self.assertTrue(soa.get_edit_url())
self.assertTrue(soa.get_delete_url())
primary = "do.com"
contact = "admf.asdf"
retry = 432152
refresh = 1235146134
description = "2"
self.do_generic_add(
primary, contact, retry, refresh, description=description)
soa = SOA.objects.filter(primary=primary, contact=contact,
retry=retry, refresh=refresh)
self.assertTrue(soa)
soa = soa[0]
self.assertTrue(soa.details())
self.assertTrue(soa.get_absolute_url())
self.assertTrue(soa.get_edit_url())
self.assertTrue(soa.get_delete_url())
primary = "ns1.derp.com"
contact = "admf.asdf"
soa = SOA(primary=primary, contact=contact)
soa.save()
self.assertTrue(
soa.serial and soa.expire and soa.retry and soa.refresh)
self.assertTrue(soa.details())
self.assertTrue(soa.get_absolute_url())
self.assertTrue(soa.get_edit_url())
self.assertTrue(soa.get_delete_url())
def test_add_remove(self):
primary = "ns2.oregonstate.edu"
contact = "admin.oregonstate.edu"
retry = 1234
refresh = 1234123
description = "3"
soa = self.do_generic_add(
primary, contact, retry, refresh, description=description)
soa.delete()
soa = SOA.objects.filter(primary=primary, contact=contact,
retry=retry, refresh=refresh)
self.assertTrue(len(soa) == 0)
primary = "dddo.com"
contact = "admf.asdf"
retry = 432152
refresh = 1235146134
description = "4"
soa = self.do_generic_add(
primary, contact, retry, refresh, description=description)
soa.delete()
soa = SOA.objects.filter(primary=primary, contact=contact, retry=retry,
refresh=refresh, description=description)
self.assertTrue(len(soa) == 0)
# Add dup
description = "4"
soa = self.do_generic_add(
primary, contact, retry, refresh, description=description)
soa.save()
self.assertRaises(ValidationError, self.do_generic_add, *(
primary, contact, retry, refresh, description))
def test_add_invalid(self):
data = {'primary': "daf..fff", 'contact': "foo.com"}
soa = SOA(**data)
self.assertRaises(ValidationError, soa.save)
data = {'primary': 'foo.com', 'contact': 'dkfa..'}
soa = SOA(**data)
self.assertRaises(ValidationError, soa.save)
data = {'primary': 'adf', 'contact': '*@#$;'}
soa = SOA(**data)
self.assertRaises(ValidationError, soa.save)
def test_delete_with_domains(self):
data = {'primary': "ns1asfdadsf.foo.com", 'contact': "email.foo.com"}
soa = SOA(**data)
soa.save()
d0 = Domain(name='com')
d0.save()
d1 = Domain(name='foo.com', soa=soa)
d1.soa = soa
d1.save()
self.assertRaises(ValidationError, soa.delete)
def test_chain_soa_domain_add(self):
data = {'primary': "ns1.foo.com", 'contact': "email.foo.com"}
soa = SOA(**data)
soa.save()
d0 = Domain(name='com')
d0.save()
d1 = Domain(name='foo.com', soa=soa)
d1.save()
self.assertTrue(soa == d1.soa)
d2 = Domain(name='bar.foo.com', soa=soa)
d2.save()
self.assertTrue(soa == d2.soa)
d3 = Domain(name='new.foo.com', soa=soa)
d3.save()
self.assertTrue(soa == d3.soa)
d4 = Domain(name='far.bar.foo.com', soa=soa)
d4.save()
self.assertTrue(soa == d4.soa)
d5 = Domain(name='tee.new.foo.com', soa=soa)
d5.save()
self.assertTrue(soa == d5.soa)
d5.delete()
d4.delete()
self.assertTrue(soa == d1.soa)
self.assertTrue(soa == d2.soa)
self.assertTrue(soa == d3.soa)
def test_update_serial_no_dirty(self):
# If we update the serial, the dirty bit shouldn't change.
data = {'primary': "fakey.ns1.asdffoo.com", 'contact':
"adsffoopy.email.foo.com"}
soa = SOA(**data)
soa.save() # new soa's are always dirty
soa.dirty = False
soa.save()
soa.serial = soa.serial + 9
soa.save()
same_soa = SOA.objects.get(pk=soa.pk)
self.assertFalse(same_soa.dirty)
| [
"uberj@onid.orst.edu"
] | uberj@onid.orst.edu |
a51011f924027a6676b24ba50a4fec72f5de0550 | b1fb632e96d85a4bd4f9f1869494147e70236546 | /model/attention_contextualGRU.py | 740ed6cb6359ee1881d3a02f3f98632c1af35b6f | [] | no_license | titanbt/contextualGRU-attention-lexicon | c9fb946550b99e1f1ac0678df5d3db363d3200d2 | 55f5b032a40a199bcb18c335f0cd908980080ce8 | refs/heads/master | 2021-06-25T04:06:53.764818 | 2018-03-29T06:56:32 | 2018-03-29T06:56:32 | 95,623,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,491 | py | from ConfigParser import SafeConfigParser
from utils.utils import minibatches_iter
from nn.lasagne_nlp.utils import utils
from loader.data_processor import DataProcessor
import codecs
import theano
from nn.attention_contexualGRU_net import Attention_ContexualGRU_Net
import numpy as np
from utils.utils import write_model_data
from utils.ProgressBar import ProgressBar
from utils.utils import get_lex_file_list
from loader.lex_helper import LexHelper
from loader.context_helper import ContextHelper
from utils.utils import compute_f1_score
np.random.seed(1999)
class Attention_ContexualGRU(object):
def __init__(self, config=None, opts=None):
# not enough info to execute
if config == None and opts == None:
print "Please specify command option or config file ..."
return
parser = SafeConfigParser()
parser.read(config)
self.train_file = parser.get('model', 'train_file')
self.dev_file = parser.get('model', 'dev_file')
self.test_file = parser.get('model', 'test_file')
self.word_column = parser.getint('model', 'word_column')
self.label_column = parser.getint('model', 'label_column')
self.oov = parser.get('model', 'oov')
self.fine_tune = parser.getboolean('model', 'fine_tune')
self.embedding = parser.get('model', 'embedding')
self.embedding_path = parser.get('model', 'embedding_path')
self.use_character = parser.getboolean('model', 'use_character')
self.batch_size = parser.getint('model', 'batch_size')
self.num_epochs = parser.getint('model', 'num_epochs')
self.patience = parser.getint('model', 'patience')
self.valid_freq = parser.getint('model', 'valid_freq')
self.L2 = [float(x)/2 for x in parser.get('model', 'L2').split(',')]
self.num_units = parser.getint('model', 'num_units')
self.num_filters = list(map(int, parser.get('model', 'num_filters').split(',')))
self.filter_size = list(map(int, parser.get('model', 'filter_size').split(',')))
self.peepholes = parser.getboolean('model', 'peepholes')
self.grad_clipping = parser.getfloat('model', 'grad_clipping')
self.dropout = parser.getfloat('model', 'dropout')
self.regular = parser.get('model', 'regular')
self.gamma = parser.getfloat('model', 'gamma')
self.learning_rate = parser.getfloat('model', 'learning_rate')
self.update_algo = parser.get('model', 'update_algo')
self.momentum = parser.getfloat('model', 'momentum')
self.decay_rate = parser.getfloat('model', 'decay_rate')
self.output_predict = parser.getboolean('model', 'output_predict')
self.model_path = parser.get('model', 'model_path')
self.training = parser.getboolean('model', 'training')
self.params_file = parser.get('model', 'params_file')
if self.params_file == 'None':
self.params_file = None
self.lex_path = parser.get('model', 'lex_path')
self.embedding_context = parser.get('model', 'embedding_context')
self.embedding_context_path = parser.get('model', 'embedding_context_path')
self.data = {'X_train': [], 'Y_train': [], 'mask_train': [],
'X_dev': [], 'Y_dev': [], 'mask_dev': [],
'X_test': [], 'Y_test': [], 'mask_test': [],
'embedd_table': [], 'label_alphabet': [],
'C_train': [], 'C_dev': [], 'C_test': [], 'char_embedd_table': []
}
self.lexicons = {'lexicons_train': [], 'lexicons_dev': [], 'lexicons_test': []}
self.contexts = []
self.setupOperators()
def setupOperators(self):
print('Loading the training data...')
self.reader = DataProcessor(self.train_file, self.dev_file,
self.test_file,
word_column=self.word_column,
label_column=self.label_column,
oov=self.oov,
fine_tune=self.fine_tune,
embedding=self.embedding,
embedding_path=self.embedding_path,
use_character=self.use_character)
self.data = self.reader.loadData()
sentences_train, sentences_dev, sentences_test, padlen, word_alphabet = self.reader.load_sentences()
lex_list = get_lex_file_list(self.lex_path)
self.lex = LexHelper(lex_list, sentences_train, sentences_dev, sentences_test, padlen)
self.lexicons["lexicons_train"], self.lexicons["lexicons_dev"], self.lexicons["lexicons_test"],\
self.lex_dim = self.lex.build_lex_embeddings()
self.context = ContextHelper(self.embedding_context, self.embedding_context_path, word_alphabet)
self.contexts = self.context.build_context()
print('Loading the data successfully!')
def initModel(self):
print "Building model..."
self.model = Attention_ContexualGRU_Net(X_train=self.data['X_train'], C_train=self.data['C_train'],
embedd_table=self.data['embedd_table'], char_embedd_table = self.data['char_embedd_table'], label_alphabet=self.data['label_alphabet'],
num_units=self.num_units, num_filters=self.num_filters, filter_size=self.filter_size, peepholes=self.peepholes,
grad_clipping=self.grad_clipping, dropout=self.dropout, regular=self.regular,
gamma=self.gamma, learning_rate=self.learning_rate, update_algo=self.update_algo,
momentum=self.momentum, fine_tune=self.fine_tune, L2=self.L2,
params_file=self.params_file, model_path=self.model_path, embedd_lex_dim=self.lex_dim,
context_table=self.contexts)
self.model.buildModel()
print "Finish building model!"
def executeModel(self):
if self.training:
print 'Training Model...'
self.trainingModel()
if self.params_file is not None:
print 'Testing Model...'
self.testModel()
def trainingModel(self):
self.initModel()
best_acc = 0
best_validation_accuracy = 0
stop_count = 0
lr = self.learning_rate
patience = self.patience
n_dev_samples, max_length = self.data['X_dev'].shape
n_test_samples, max_length = self.data['X_test'].shape
for epoch in range(1, self.num_epochs + 1):
print 'Epoch %d (learning rate=%.4f, decay rate=%.4f): ' % (epoch, lr, self.decay_rate)
train_err = 0.0
train_batches = 0
train_bar = ProgressBar('Training', max=len(self.data['X_train']))
for batch in minibatches_iter(self.data['X_train'], self.data['Y_train'], masks=self.data['mask_train'],
char_inputs=self.data['C_train'],
lexicons=self.lexicons['lexicons_train'],
batch_size=self.batch_size, shuffle=True):
inputs, targets, masks, char_inputs, lexicons = batch
err = self.model.train_fn(inputs, targets, masks, char_inputs, lexicons)
train_err += err
train_bar.next(len(inputs))
if train_batches > 0 and train_batches % self.valid_freq == 0:
accuracy_valid = []
for batch in minibatches_iter(self.data['X_dev'], self.data['Y_dev'],
masks=self.data['mask_dev'], lexicons=self.lexicons['lexicons_dev'],
char_inputs=self.data['C_dev'], batch_size=self.batch_size):
inputs, targets, masks, char_inputs, lexicons = batch
accuracy_valid.append(self.model.eval_fn(inputs, targets, masks, char_inputs, lexicons))
this_validation_accuracy = np.concatenate(accuracy_valid)[0:n_dev_samples].sum() / float(n_dev_samples)
if this_validation_accuracy > best_validation_accuracy:
print("\nTrain loss, " + str((train_err / self.valid_freq)) + ", validation accuracy: " + str(this_validation_accuracy * 100) + "%")
best_validation_accuracy = this_validation_accuracy
preds_test = []
accuracy_test = []
for batch in minibatches_iter(self.data['X_test'], self.data['Y_test'],
masks=self.data['mask_test'],
char_inputs=self.data['C_test'], lexicons=self.lexicons['lexicons_test'],
batch_size=self.batch_size):
inputs, targets, masks, char_inputs, lexicons = batch
_, preds = self.model.test_fn(inputs, targets, masks, char_inputs, lexicons)
preds_test.append(preds)
accuracy_test.append(self.model.eval_fn(inputs, targets, masks, char_inputs, lexicons))
this_test_accuracy = np.concatenate(accuracy_test)[0:n_test_samples].sum() / float(n_test_samples)
# print "F1-score: " + str(compute_f1_score(self.data["Y_test"], preds_test, self.data['label_alphabet']) * 100)
print("Test accuracy: " + str(this_test_accuracy * 100) + "%")
if best_acc < this_test_accuracy:
best_acc = this_test_accuracy
write_model_data(self.model.network, self.model_path + '/best_model')
train_err = 0
train_batches += 1
train_bar.finish()
# stop if dev acc decrease 3 time straightly.
if stop_count == patience:
break
# re-compile a function with new learning rate for training
if self.update_algo != 'adadelta':
lr = self.learning_rate / (1.0 + epoch * self.decay_rate)
updates = utils.create_updates(self.model.loss_train, self.model.params, self.update_algo, lr, momentum=self.momentum)
self.model.train_fn = theano.function([self.model.input_var, self.model.target_var, self.model.mask_var, self.model.char_input_var, self.model.lex_var],
outputs=self.model.loss_train,
updates=updates, allow_input_downcast=True)
print("Epoch " + str(epoch) + " finished.")
print("The final best acc: " + str(best_acc*100) + "%")
if self.output_predict:
f = codecs.open('./results/10-fold.txt', 'a+', 'utf-8')
f.write(str(best_acc*100)+'\n')
f.close()
def testModel(self):
n_test_samples, max_length = self.data['X_test'].shape
accuracy_test = []
preds_test = []
self.initModel()
test_bar = ProgressBar('Testing', max=len(self.data['X_test']))
for batch in minibatches_iter(self.data['X_test'], self.data['Y_test'],
masks=self.data['mask_test'],
char_inputs=self.data['C_test'], lexicons=self.lexicons['lexicons_test'],
batch_size=self.batch_size):
inputs, targets, masks, char_inputs, lexicons = batch
test_bar.next(len(inputs))
corrects = self.model.eval_fn(inputs, targets, masks, char_inputs, lexicons)
_, preds = self.model.test_fn(inputs, targets, masks, char_inputs, lexicons)
preds_test.append(preds)
accuracy_test.append(corrects)
this_test_accuracy = np.concatenate(accuracy_test)[0:n_test_samples].sum() / float(n_test_samples)
test_bar.finish()
print("Test accuracy: " + str(this_test_accuracy * 100) + "%")
# compute_f1_score(self.data['Y_test'], preds_test) | [
"huy.nguyen.plus@gmail.com"
] | huy.nguyen.plus@gmail.com |
6f5d17d41d1ec5ecbe55d5b7781cedf299a74aa5 | acb5667fbb95ac826828052f6e924acc50088de4 | /db/subscriptions.py | 4fe3f7d59a0fac215615d961cb44ab78bcab9f9f | [
"MIT"
] | permissive | mcastellin/pnotifier_bot | 486cd6c51d69e20d8c92394b477cb7d0d588ce2e | 33d94cb224436c6c965165f97a8892e5a620a90c | refs/heads/main | 2023-08-07T02:39:35.359599 | 2021-10-07T06:24:34 | 2021-10-07T06:24:34 | 373,787,341 | 0 | 0 | MIT | 2021-10-06T14:34:34 | 2021-06-04T09:19:05 | Python | UTF-8 | Python | false | false | 2,176 | py | import logging
import boto3
import sys
import os
TABLE_NAME = "subscriptions"
DYNAMODB_ENDPOINT_URL = os.getenv("DYNAMODB_ENDPOINT_URL", "http://localhost:4566")
ddb = boto3.resource(
"dynamodb",
aws_access_key_id="00000",
aws_secret_access_key="0000",
endpoint_url=DYNAMODB_ENDPOINT_URL,
region_name="eu-west-1",
)
def get_subscription(chat_id):
subscriptions = ddb.Table(TABLE_NAME)
response = subscriptions.get_item(
Key={"chat_id": chat_id, "subscription_status": "true"}
)
if "Item" in response:
return response["Item"]
else:
return None
def add_subscription(chat_id, first_name):
subscriptions = ddb.Table(TABLE_NAME)
subscriptions.put_item(
Item={
"chat_id": chat_id,
"first_name": first_name,
"subscription_status": "true",
}
)
def remove_subscription(chat_id):
subscr = get_subscription(chat_id)
if subscr is not None:
subscriptions = ddb.Table(TABLE_NAME)
subscriptions.delete_item(
Key={"chat_id": chat_id, "subscription_status": "true"}
)
return True
return False
def _create_table():
subscriptions = ddb.create_table(
TableName=TABLE_NAME,
KeySchema=[
{"AttributeName": "chat_id", "KeyType": "HASH"},
{"AttributeName": "subscription_status", "KeyType": "RANGE"},
],
AttributeDefinitions=[
{"AttributeName": "chat_id", "AttributeType": "N"},
{"AttributeName": "subscription_status", "AttributeType": "S"},
],
ProvisionedThroughput={"ReadCapacityUnits": 5, "WriteCapacityUnits": 5},
)
subscriptions.meta.client.get_waiter("table_exists").wait(TableName=TABLE_NAME)
# Create new table if does not exists
if TABLE_NAME not in list(map(lambda t: t.name, ddb.tables.all())):
logging.info(f"Creating table {TABLE_NAME} in DynamoDb")
_create_table()
if __name__ == "__main__":
if len(sys.argv) >= 2 and sys.argv[1] == "delete":
print("Deleting subscriptions table!")
subscriptions = ddb.Table(TABLE_NAME)
subscriptions.delete()
| [
"manuel@castellinconsulting.com"
] | manuel@castellinconsulting.com |
0511554548309a1cca947a51f8f6b7629ec8ea61 | c18dc7e4c13fcfd8c282b867689fab42c3e25987 | /songer.py | 287ddc341dbd194ebe39868b7a90bdedc0093b17 | [] | no_license | KorigamiK/anime-ost-downloader | 39bfc282945acda0103d50483106ef141f06c7b9 | 5e55e2992039527451a59488e2a857a51c6e8cf1 | refs/heads/main | 2023-01-22T12:35:29.374369 | 2020-11-23T11:41:04 | 2020-11-23T11:41:04 | 315,296,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,309 | py | import requests
from bs4 import BeautifulSoup as bs
import re
from tabulate import tabulate
import subprocess
data = {
"action": "data_fetch",
"keyword": input("Enter anime name: ")
}
response = requests.post("https://simplyaweeb.com/wp-admin/admin-ajax.php", data = data)
soup = bs(response.text, "html.parser")
cover = []
ref = []
results = []
for j,i in enumerate(soup.find_all('a')):
try:
if i["class"] == ['a-cover']:
cover.append(i['href'])
elif i["class"] == ['video-close', 'search-single']:
ref.append("https://gogoanime-six.now.sh/api/v1/MusicSingleHandler/"+i["href"])
results.append([j//2, i.text.strip()])
except:
pass
print(tabulate(results))
opt = int(input("Enter number: "))
k = requests.get(ref[opt])
print(ref[opt])
link = k.json()['music'][0]['music_single_url']
token = re.search(r'(download\/dl4\/)(.+)(\/o)', link).group(2)
name = link.split("/")[-1]
dow_link = f"http://dl4.wapkizfile.info/download/{token}/477539e252d5e2e8b94b2892725a838c/osanime+wapkiz+com/{name}"
def download(link):
query=f"""wget "{link}" -q --show-progress --no-check-certificate -O {name.replace("-(osanime.com)", "")}"""
subprocess.run(query,shell=True)
download(dow_link) | [
"noreply@github.com"
] | KorigamiK.noreply@github.com |
a2556ca3423248387c4752f977fd8c4b52bd63ec | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/135/usersdata/224/45859/submittedfiles/OBI.py | 5fea3ee2b17d98501bf3732ea5e8dc6b5487efd9 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | # -*- coding: utf-8 -*-
N=int(input('Digite o numero de competidores: '))
P=int(input('Digite o numero mínimo de ponto para as duas fases: '))
somA=o
i=1
for i in range(1.n+1,1):
x=int(input('Nota1: '))
y=int(input('Nota2: '))
if x+y==P:
soma=soma+1
print(soma) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
b13cac275f608ebea67b00f70a7d5e7ec19afebe | fa247fbb7755cf21fa262b02fa6c36a19adae55a | /manage.py | d344b73c096cab159e05d4a868f32b29a1888f88 | [] | no_license | crowdbotics-apps/csantosmachadogmailco-897 | 9ea0f851badc9678a8148ea051131eaa711b26de | b76954f678212ece02777d3deebb3e571bfb578c | refs/heads/master | 2022-12-13T12:51:23.337978 | 2019-02-07T13:01:07 | 2019-02-07T13:01:07 | 169,572,262 | 0 | 0 | null | 2022-12-08T01:37:36 | 2019-02-07T12:59:48 | Python | UTF-8 | Python | false | false | 823 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "csantosmachadogmailco_897.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
560e49829624c61234c4d490a90f14c97d7ea2a1 | f22ed91535c999a1749786ad03e214f01c4ddcc7 | /Angle_Metrics.py | f9dddfd9218b28edba7c923220c0013b277285d1 | [] | no_license | djbrenne/Python | 022b1643a5a9181af95cde75fa156bad4e944f47 | c1fac9870fdbad8b25a5cfaf3873abcbf1149f20 | refs/heads/master | 2020-04-07T03:21:07.462566 | 2018-11-20T18:57:59 | 2018-11-20T18:57:59 | 158,012,795 | 0 | 1 | null | 2019-06-05T19:11:48 | 2018-11-17T18:26:32 | Python | UTF-8 | Python | false | false | 17,276 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 16 2018
@author: Dylan Brenneis
"""
#Imports
import os
import csv
import numpy as np
from scipy.signal import butter, lfilter, freqz
import matplotlib.pyplot as plt
#Initialize variables
directory = "H:/Dylan Brenneis/Compensatory Movements Study/Testing/Pro00077893-03-18-1"
Bento_directory = "H:/Dylan Brenneis/Compensatory Movements Study/BrachIOPlexus Logs csv/Pro00077893-03-18-1"
Savedir = "H:/Dylan Brenneis/Compensatory Movements study/Testing/Angle Metrics/"
current_dir = ""
Bento_current_dir = ""
ProParticipant = "Pro00077893-03-18-1"
dir_list = []
Bento_dir_list = []
joint_names = ["K:Cluster:TrunkFE", "K:Cluster:TrunkLatBend", "K:Cluster:TrunkAxRot", "K:Cluster:PrmryShoFE", "K:Cluster:PrmryShoAbAd", "K:Cluster:PrmryShoIERot"]
column_names = ["M:RHND:RHND1:X","M:RHND:RHND1:Y","M:RHND:RHND1:Z","K:Cluster:TrunkFE", "K:Cluster:TrunkLatBend", "K:Cluster:TrunkAxRot", "K:Cluster:PrmryShoFE", "K:Cluster:PrmryShoAbAd", "K:Cluster:PrmryShoIERot"]
Bento_joint_names = ["POS3", "POS4"]
Bento_joint_names_real = ["WristRot","WristR/UDev"]
Bento_column_names = ["POS3","POS4","VEL5"]
# Sample rate and desired cutoff frequencies (in Hz).
fs = 120.0
Bento_fs = 250
highcut = 10.0
Bento_highcut = 10.0
buffer_time = 50 #how many timesteps at the beginning of the file to ignore due to filter settling effects
Bento_buffer_time = 50
#Saves all the important columns in the .csv file
def getcolumns(name):
with open(current_dir + "/" + trial, "rb") as csvfile:
reader = csv.reader(csvfile,delimiter = ",")
col_numbers = getcolumnnums(reader) #read the first line only; get the important column numbers from the header
data_dict = {}
for item in column_names: #initialize a dictionary with keys being the column header names
data_dict[item] = []
for row in reader: #run through the .csv saving the important column information in the dictionary
i = 0
for item in column_names:
data_dict[item].append(float(row[col_numbers[i]]))
i = i+1
return data_dict
#Same as getcolumns, but for Bento Data.
def getbentocolumns(name):
with open(Bento_current_dir + "/" + trial, "rb") as csvfile:
reader = csv.reader(csvfile,delimiter = ",")
col_numbers = getbentocolumnnums(reader) #read the first line only; get the important column numbers from the header
data_dict = {}
for item in Bento_column_names: #initialize a dictionary with keys being the column header names
data_dict[item] = []
for row in reader: #run through the .csv saving the important column information in the dictionary
i = 0
for item in Bento_column_names:
data_dict[item].append(float(row[col_numbers[i]]))
i = i+1
return data_dict
#Gets the column number of the important columns
def getcolumnnums(reader):
for row in reader:
importantcols = []
for i in range(len(row)):
for name in column_names:
if row[i] == name:
importantcols.append(i)
return importantcols
#same as getcolumnnums, but for Bento data
def getbentocolumnnums(reader):
for row in reader:
importantcols = []
for i in range(len(row)):
for name in Bento_column_names:
if row[i] == name:
importantcols.append(i)
return importantcols
#finds resultant hand speed for all positions in time
def gethandvel(data_dict):
handvel = {"M:RHND:RHND1:X":[],"M:RHND:RHND1:Y":[],"M:RHND:RHND1:Z":[]}
for item in ["M:RHND:RHND1:X","M:RHND:RHND1:Y","M:RHND:RHND1:Z"]:
position = butter_lowpass_filter(linearfill(data_dict[item]), highcut, fs, order = 6)
for i in range(len(position)-1):
handvel[item].append((position[i+1]-position[i]) * 120) #position data comes in at 120Hz
resultantvel = []
for i in range(len(handvel["M:RHND:RHND1:X"])):
resultantvel.append(np.sqrt(np.square(handvel["M:RHND:RHND1:X"][i]) + np.square(handvel["M:RHND:RHND1:Y"][i]) + np.square(handvel["M:RHND:RHND1:Z"][i])))
return resultantvel
#gives the hand velocity that triggers the start and end of trials. 5% of peak velocity (or 5% of 1500 mm/s, whichever is lower)
def gettriggervel(handvel):
peak = max(handvel[buffer_time:len(handvel)])
peak = min([peak,1500])
return 0.05 * peak
#fills and filters the gripper velocity from the bento data
def getgrippervel(data):
return butter_lowpass_filter(linearfill(data["VEL5"]),Bento_highcut,Bento_fs,order = 6)
#Converts the encoder position data from the servos to joint angles
def convert_to_angles(column,joint_type):
if joint_type == "POS3":
column = column - np.nanmean(column) #since the true rotation angle is some combination of biological wrist rotation and bento arm rotation, set the average angle to zero for now and consider the implications.
elif joint_type == "POS4": #2045 is defined as 0 degrees. Up is positive angle, down is negative.
column = column - 2045
degree_col = column / 11.361111111111111 #11.36111111 degrees per encoder tick
return degree_col
#This function returns the metrics of the joint angles in a list [max,min,range,mean,median]. Strings, rounded to 4 decimal places.
def metrics(joint_angles):
metric_list = []
metric_list.append(str(round(np.nanmax(joint_angles),4)))
metric_list.append(str(round(np.nanmin(joint_angles),4)))
metric_list.append(str(round(np.nanmax(joint_angles)-np.nanmin(joint_angles),4)))
metric_list.append(str(round(np.nanmean(joint_angles),4)))
metric_list.append(str(round(np.nanmedian(joint_angles),4)))
return metric_list
#Makes a list of the strings to write the line of data that will be printed to the csv
def getdataline(task,intervention,jointName,metriclist):
dataline = [task,intervention,jointName]
for i in range(len(metriclist)):
dataline.append(metriclist[i])
return dataline
#determines the task given the trial name
def gettask(trialname):
if "Pasta" in trialname:
return "Pasta"
elif "Cups" in trialname:
return "Cups"
else:
return "Other"
#determines the intervention type given the trial name
def getintervention(trialname):
if "AL" in trialname:
return "AL"
elif "SS" in trialname:
return "SS"
elif "F" in trialname:
return "FW"
else:
return "Other"
#trims the column of data to include the trial only (no before start or after end movement):
def trim(trialstart,trialend,column):
return column[trialstart:trialend]
#Builds the filter
def butter_lowpass(highcut, fs, order=5):
nyq = 0.5 * fs
high = highcut / nyq
b, a = butter(order, high, btype='low')
return b, a
#actually filters the data
def butter_lowpass_filter(data, highcut, fs, order=5):
b, a = butter_lowpass(highcut, fs, order=order)
y = lfilter(b, a, data)
return y
#creates a file with the participant number as the file name, and appropriate header
def createfile(Savedir,participant):
with open(Savedir + participant + ".csv", "wb") as writefile:
writefile.write("TASK,INTERVENTION,JOINT,MAX,MIN,RANGE,MEAN,MEDIAN")
#writes the given line of data to csv with the participant number as the file name
def writetocsv(Savedir,participant,data):
with open(Savedir + participant + ".csv", "a") as writefile:
datastring = ",".join(data)
writefile.write("\n" + datastring)
#determines the start of the trial based on hand velocity
def getstart(handvel,triggervel):
for i in range(buffer_time,len(handvel)):
if handvel[i] >= triggervel and handvel[i-1] < handvel[i]:
return i
elif i == len(handvel)-1: #if never triggered
return i #will result in len_1 data sequence, should still work but be obvious that it's erroneous
else:
pass
#determines the start of the trial for Bento data based on gripper velocity (immediately after the full open/full close synchronization procedure)
def getbentostart(data):
triggercheck1 = False
triggercheck2 = False
for i in range(Bento_buffer_time,len(data)):
if data[i] < -100 and not triggercheck1: # the velocity goes down past -100 before coming back up past 0
triggercheck1 = True
if data[i] > 0 and triggercheck1: # once past 0, when the velocity comes back down we start the trial
triggercheck2 = True
if data[i] < 1.0 and triggercheck1 and triggercheck2: # in case it doesn't get to exactly 0, it should still trigger
return i
elif i == len(data): #if never triggered
return i # will result in len_1 data sequence, should still work but be obvious that it's erroneous
else:
pass
#determines the end of the trial for Bento data based on gripper velocity (1 s after last hand open, or the end of the data file.)
def getbentoend(data):
i = len(data) - 1
while i >=0:
if data[i] > 25:
return min(i + 250, len(data)) #250 timesteps ~ 1s at a 3-4 ms timestep
else:
i -= 1
return len(data)
#determines the end of the trial based on hand velocity
def getend(handvel,triggervel):
for i in range(len(handvel)-buffer_time):
j = len(handvel) - i - 1
if handvel[j] >= triggervel and handvel[j-1] < handvel[i]:
return j
elif i == len(handvel)-buffer_time-1: #if never triggered
return len(handvel) #give the whole trial length
#fills in NaN data with interpolated values
def linearfill(column):
for i in range(len(column)):
value = column[i]
if i == 0: #don't interpolate the first value
new_value = value
elif i == len(column) - 1: # don't interpolate the last value
new_value = value
elif np.isnan(value):
j = 1
while np.isnan(column[i+j]) and i + j < len(column) - 1: #look ahead until you see real data or the end of the column
j = j + 1
new_value = (column[i+j]-column[i-1]) / (j+1) + column[i-1] #linear interpolation, knowing everything behind has already been filled
else:
new_value = value
column[i] = new_value
return column
#MAIN LOOP
#Loop through the directories for the participants we care about
for ppt in ["95"]:#"35", "42", "45", "53", "80", "96"
participant = ProParticipant + ppt
current_dir = directory + ppt
Bento_current_dir = Bento_directory + ppt
dir_list = []
Bento_dir_list = []
dir_list = os.listdir(current_dir)
Bento_dir_list = os.listdir(Bento_current_dir)
#create a new .csv file for the participant (overwrite if one exists)
createfile(Savedir,participant)
#for each trial in the participant folder
for trial in dir_list:
#read the file, saving only the pertinent columns
data_dict = getcolumns(current_dir + "/" + trial)
Bento_data_dict = getbentocolumns(Bento_current_dir + "/"+ trial)
#determine the task type of the trial
task = gettask(trial)
#determine the intervention (AL, SS, FW) of the trial
intervention = getintervention(trial)
#get hand velocity information
handvel = gethandvel(data_dict)
triggervel = gettriggervel(handvel)
grippervel = getgrippervel(Bento_data_dict)
#find the row of trial start
trialstart = getstart(handvel,triggervel)
bento_trialstart = getbentostart(grippervel)
#find the row of trial end
trialend = getend(handvel,triggervel)
bento_trialend = getbentoend(grippervel)
#for each joint angle type, get the metrics and write to a single line
for i in range(len(joint_names)):
#fill the column, removing NaNs
filledcolumn = linearfill(data_dict[joint_names[i]])
#filter the column
filteredcolumn = butter_lowpass_filter(filledcolumn, highcut, fs, order = 6)
#trim the column for trial data only
trimmedcolumn = trim(trialstart,trialend,filteredcolumn)
#get the metrics from the column
measures = metrics(trimmedcolumn)
#get the data all into a single line
dataline = getdataline(task,intervention,joint_names[i],measures)
#print to .csv
writetocsv(Savedir,participant,dataline)
#for each joint angle type in the Bento Data, also get the metrics and write to a single line
for i in range(len(Bento_joint_names)):
#fill the column, removing NaNs
filledcolumn = linearfill(Bento_data_dict[Bento_joint_names[i]])
#filter the column
filteredcolumn = butter_lowpass_filter(filledcolumn, Bento_highcut, Bento_fs, order = 6)
#trim the column for trial data only
trimmedcolumn = trim(bento_trialstart,bento_trialend,filteredcolumn)
#convert the position encoder data to angles
convertedcolumn = convert_to_angles(trimmedcolumn,Bento_joint_names[i])
#get the metrics from the column
measures = metrics(convertedcolumn)
#get the data all into a single line
dataline = getdataline(task,intervention,Bento_joint_names_real[i],measures)
#print to .csv
writetocsv(Savedir,participant,dataline)
#plot the hand velocity for each trial, and save to .png
#plt.figure(1)
#plt.clf()
#t = np.linspace(0, len(grippervel), len(grippervel))
#plt.plot(t, grippervel, label = 'Gripper Velocity')
#t_jointangle = np.linspace(0, len(convertedcolumn), len(convertedcolumn))
#plt.plot(t_jointangle, convertedcolumn, label = 'Joint Angle')
#t_raw = np.linspace(0,len(Bento_data_dict[Bento_joint_names[i]]), len(Bento_data_dict[Bento_joint_names[i]]))
#plt.plot(t_raw, Bento_data_dict[Bento_joint_names[i]], label = "Raw")
#plt.title(trial)
#plt.ylabel('Velocity (mm/s)')
#plt.xlabel('Timestep')
#plt.grid(True)
#plt.axis('tight')
#plt.legend(loc = 'best')
#plt.savefig(Savedir + "Hand Velocity test Figures/" + trial + ".png")
#plt.show()
#trial_length = [str(trialend - trialstart),str(triggervel)]
#writetocsv(Savedir,participant,trial_length)
# plt.clf()
#Plot the data being filtered as raw
# t_raw = np.linspace(0, len(handvel),len(handvel))
# plt.plot(t_raw, handvel, label='Hand Velocity')
# #plot filtered data
# t_flt = np.linspace(0, len(trimmedcolumn),len(trimmedcolumn))
# plt.plot(t_flt, trimmedcolumn, label='Filtered signal')
# plt.xlabel('time (seconds)')
# plt.grid(True)
# plt.axis('tight')
# plt.legend(loc='upper left')
# plt.savefig(Savedir + participant + ".png")
# plt.show()
| [
"djbrenne@ualberta.ca"
] | djbrenne@ualberta.ca |
6670e2460a625e2cfb88755fbfe891cdb613a680 | cd99382b3787d0575e05afe4d8d22b32e18e16cc | /customers/serializers.py | b89105cfd972c3ec3c40a8703f7b40bae4ede610 | [] | no_license | dhruv217/iocl-aplication-django | bce4821fa943d017f42d63a6e6e185a233d294ca | 53a8149d9b03f45e446815557c294826a58cede4 | refs/heads/master | 2020-04-14T21:45:17.093120 | 2019-01-05T11:10:30 | 2019-01-05T11:10:30 | 164,139,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | from rest_framework import serializers
from customers.models import Customer
class CustomerSerializer(serializers.ModelSerializer):
added_by = serializers.StringRelatedField(
many=False,
read_only=True
)
class Meta:
model = Customer
fields = ('id', 'url', 'name', 'address', 'photo_url',
'land_details', 'mobile_number', 'email', 'added_by')
| [
"dhruv217@localhost.localdomain"
] | dhruv217@localhost.localdomain |
88e1e0c8b162f7c1ddb2a9bc32f0881713f42175 | 39db9df7c659d77c4b22024cee3cfaca84db0966 | /trood/api/custodian/tests/test_cache.py | eba43216a868cba628c20412f991f1b72d534c3b | [] | no_license | boooka/trood-sdk | 1bcdf94728cf0230533fbc68851c36adac3a098a | d97a8656845187067c4c0467451ca718e30d47f5 | refs/heads/master | 2022-09-21T06:17:41.059497 | 2020-03-19T16:15:50 | 2020-03-19T16:15:50 | 260,407,682 | 0 | 0 | null | 2020-05-01T08:01:29 | 2020-05-01T08:01:29 | null | UTF-8 | Python | false | false | 2,654 | py | import copy
import os
import pytest
from hamcrest import *
from trood.api.custodian.client import Client
from trood.api.custodian.objects import Object
from trood.api.custodian.tests.utils import call_counter
@pytest.mark.usefixtures('flush_database')
class TestRqlSeries:
def test_cache_works_for_get_operation(self, existing_person_object: Object):
"""
Once meta retrieved from Custodian, it should be returned from cache
"""
client = Client(server_url=os.environ['SERVER_URL'], use_cache=True)
client.execute = call_counter(client.execute)
person_object = client.objects.get(existing_person_object.name)
assert_that(person_object, instance_of(Object))
initial_call_count = client.execute.call_count
re_retrieved_person_object = client.objects.get(existing_person_object.name)
assert_that(person_object, is_(re_retrieved_person_object))
assert_that(client.execute.call_count, equal_to(initial_call_count))
def test_cache_is_flushed_on_object_update(self, existing_person_object: Object):
"""
Once meta retrieved from Custodian, it should be returned from cache
"""
client = Client(server_url=os.environ['SERVER_URL'], use_cache=True)
client.execute = call_counter(client.execute)
initial_person_object = client.objects.get(existing_person_object.name)
assert_that(initial_person_object, instance_of(Object))
initial_call_count = client.execute.call_count
updated_person_object = copy.deepcopy(existing_person_object)
del updated_person_object._fields['street']
# two calls of 'execute'(both for update and get operation) should be done
client.objects.update(updated_person_object)
re_retrieved_person_object = client.objects.get(existing_person_object.name)
assert_that(initial_call_count + 2, equal_to(client.execute.call_count))
assert_that(re_retrieved_person_object.fields, has_length(len(updated_person_object.fields)))
def test_cache_is_flushed_on_object_remove(self, existing_person_object: Object):
"""
Once meta retrieved from Custodian, it should be returned from cache
"""
client = Client(server_url=os.environ['SERVER_URL'], use_cache=True)
client.execute = call_counter(client.execute)
initial_person_object = client.objects.get(existing_person_object.name)
assert_that(initial_person_object, instance_of(Object))
client.objects.delete(existing_person_object)
assert_that(client.objects.get(existing_person_object.name), is_(None))
| [
"ornot.work@gmail.com"
] | ornot.work@gmail.com |
c745235af1797ebd4a55fabd5cba7deb70ca8a90 | 99ed338990ed7021c07b13e906a8d54a8ade496e | /0x06-python-classes/0-square.py | ba3353f0d80e37e7ba66acf03678ff0fb7419ce0 | [] | no_license | arioledavid/alx-higher_level_programming | 14bf9f57d9ac0fa62c7b9dfc177d005de8d95741 | f08ce1bfd8f6efac7c7c814f383f154d98b75bc5 | refs/heads/master | 2023-09-03T00:20:03.791193 | 2021-11-18T22:04:10 | 2021-11-18T22:04:10 | 403,700,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | #!/usr/bin/python3
""" Module 0-square """
class Square:
""" Empty Class """
pass
| [
"davidariole@gmail.com"
] | davidariole@gmail.com |
840c7867d840c4b0d35688536da325130c630cf9 | 013da629a4d1f67f891444c2ed6e462cf1f0c092 | /main.py | 166b3cccf06bbeb7d43088ff5650cc605646e781 | [] | no_license | FilatovF/pythonProject4 | 2b97930a0003f4b5aa010974515cfdc4f2e67b58 | 5cf67b76cb8aaac0dea297910a9da69dd4cbc061 | refs/heads/master | 2023-04-20T22:33:54.592548 | 2021-05-10T11:56:32 | 2021-05-10T11:56:32 | 366,026,297 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | """Task 1"""
lis = [12, 13, 14, 15]
def oops() -> int:
return lis[5]
def exept():
try:
print(oops())
except IndexError:
return "Ooops"
print(exept())
"""Task2"""
def calc():
while True:
try:
a = int(input("Enter a:"))
b = int(input("Enter b:"))
c = a ** 2 / b
return c
except ValueError:
print("Введите числа")
except Exception as e:
print("Делить на ноль нельзя")
print(e)
print(calc()) | [
"filatov.f@ukr.net"
] | filatov.f@ukr.net |
7814cc5ddc37a8a14f896539b6a14f2a41cace21 | bb67c8a9bbfc69fd121a968c8362a22b01203228 | /backend/myapp/db_models.py | a1efca07e4e1a2935ae76225b94c9d8a9af1f29b | [
"MIT"
] | permissive | jscriptcoder/Docker-development | 889d17c5be130ce0b98590e51d9358ee38588bda | 411e66465e11e451c87f375b005feb50f5a15b3d | refs/heads/main | 2023-05-04T17:18:50.246408 | 2021-05-21T15:30:07 | 2021-05-21T15:30:07 | 347,105,718 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | from datetime import datetime
from .db import db
class ProcessModel(db.Model):
__tablename__ = 'process'
id = db.Column(db.String(32), primary_key=True)
started_on = db.Column(db.DateTime, default=datetime.utcnow)
ended_on = db.Column(db.DateTime)
result = db.Column(db.String(32))
def update(self, **kwargs):
for key, value in kwargs.items():
if hasattr(self, key):
setattr(self, key, value)
def to_dict(self):
return {'id': self.id, 'result': self.result}
def __repr__(self):
return f'<Process {self.id}>'
| [
"jscriptcoder@gmail.com"
] | jscriptcoder@gmail.com |
be9ed805d280be385d91bfdc757aff6b00158595 | f039fba32e1011cbbf05e546cc53d9843845cb82 | /src/pygentoolbox/TrimFastqToMidpoint.py | f49056cd845cac67b1812bd0ec12edbb939031a9 | [
"MIT"
] | permissive | VCMason/PyGenToolbox | f12879c9e36455db8b02f0c60d2dd4a51d8473c0 | 3367a9b3df3bdb0223dd9671e9d355b81455fe2f | refs/heads/master | 2022-05-14T13:48:50.423466 | 2022-04-10T13:02:48 | 2022-04-10T13:02:48 | 48,924,867 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,786 | py |
def trim_fastq_to_midpoint_gz(fastqfile, outfile, n=50, seqlenmin=125, seqlenmax=175):
# assume all sequences are complete fragments.
# n is length of midpoint sequence
print('Assuming all sequences are complete sequences fragments.')
print(f'Trimming front and end of all sequences to midpoints. Midpoint length = {n}')
print(f'Input file: {fastqfile}')
import gzip
import math
with gzip.open(outfile, 'wb') as OUT:
OUT.write(b'')
with gzip.open(outfile, 'ab') as OUT:
outlines = []
count, total = 0, 0
with gzip.open(fastqfile, 'rb') as GZ:
for lnum, line in enumerate(GZ):
# ord('i') converts ASCII to int value, chr(105) converts int to ASCII
if (lnum % 4 == 0) or (lnum % 4 == 2): # (line[0] == ord('@')) or (line[0] == ord('+')):
name = line.strip()
elif (lnum % 4 == 1) or (lnum % 4 == 3):
seqlength = len(line.strip())
if (seqlength >= seqlenmin) and (seqlength <= seqlenmax):
outlines.append(name)
if seqlength / n <= 1:
outlines.append(line.strip())
else:
extrabases = seqlength - n
trimlen = extrabases / 2
# rounding ensures that 1 extra base will be removed from end of seq if trimlen is float ex:50.5
outlines.append(line.strip()[math.floor(trimlen):-1 * math.ceil(trimlen)])
count += 1
# output data every 10,000 lines
if (count >= 1000000) and (count % 1000000 >= 1):
if total == 0:
OUT.write(b'\n'.join(outlines))
else:
OUT.write(b'\n' + b'\n'.join(outlines))
total += count
print(total)
outlines = []
count = 0
# if data left over output last lines
if len(outlines) > 0:
OUT.write(b'\n' + b'\n'.join(outlines) + b'\n')
print(f'Midpoint sequences output to file: {outfile}')
return
def trim_fastq_to_midpoint(fastqfile, outfile, n=50):
# assume all sequences are complete fragments.
# n is length of midpoint sequence
print('Assuming all sequences are complete sequences fragments.')
print(f'Trimming front and end of all sequences to midpoints. Midpoint length = {n}')
print(f'Input file: {fastqfile}')
import math
outlines = []
with open(fastqfile, 'r') as FILE:
for line in FILE:
if line[0] == '>':
outlines.append(line.strip())
else:
seqlength = len(line.strip())
if seqlength / n <= 1:
outlines.append(line.strip())
else:
extrabases = seqlength - n
trimlen = extrabases / 2
# rounding ensures that 1 extra base will be removed from end of seq if trimlen is float (ex: 50.5)
outlines.append(line.strip()[math.floor(trimlen):-1*math.ceil(trimlen)])
with open(outfile, 'w') as OUT:
OUT.write('\n'.join(outlines))
print(f'Midpoint sequences output to file: {outfile}')
return
def main(fastqfile, outfile, midpointlength=50, seqlenmin=125, seqlenmax=175):
if fastqfile[-3:] == '.gz':
trim_fastq_to_midpoint_gz(fastqfile, outfile, midpointlength, seqlenmin, seqlenmax)
else:
trim_fastq_to_midpoint(fastqfile, outfile, midpointlength)
| [
"noreply@github.com"
] | VCMason.noreply@github.com |
f4115363359207d8d565955829e6cec80b3f3d8b | 2228d7b6039c19dd24253815b609441cac13c3fe | /knapsack_heuristic/utils/instance.py | 9080619b0086d384232cea081407ae8ae1326ac7 | [
"MIT"
] | permissive | patrotom/combinatorial-optimization-problems | bd3d759917504e7f60ea4c979bbea7c3a88aca04 | cead0d60d62a2adf90f4bb5c144a33f8f36e0fc3 | refs/heads/master | 2023-02-15T06:50:13.588912 | 2021-01-12T18:06:28 | 2021-01-12T18:06:28 | 300,247,122 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | from math import floor
from functools import reduce
class Instance:
def __init__(self, id, size, capacity, items, opt_price):
self.id = int(id)
self.size = int(size)
self.capacity = int(capacity)
self.items = items
self.opt_price = int(opt_price)
self.eps = 0
def prices_sum(self, i=0):
return sum(item.price for item in self.items[i:])
def sort_items(self):
self.items.sort(key=lambda i: i.price / i.weight, reverse=True)
def floor_prices(self, k):
for item in self.items:
item.price = int(floor(item.price / k))
if item.price < 1:
item.price = 1
| [
"tomas.patro@gmail.com"
] | tomas.patro@gmail.com |
d14ce4d4e3bde8dff86cf8596a0610aa6ce7e652 | 210e88536cd2a917fb66010ff69f6710b2261e8e | /games/migrations/0002_auto__add_field_game_is_over.py | ab9299348edd60d9b0e5600523b1bc7cd285f9ce | [] | no_license | tlam/multiverse_sidekick | e5ef1fa908c6fd3fee4d816aa1776b7243075e8c | 9211e4cb36611088420a79666f0c40ecb0a6b645 | refs/heads/master | 2020-04-17T08:30:28.396623 | 2015-08-27T03:36:47 | 2015-08-27T03:36:47 | 9,423,955 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,875 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Game.is_over'
db.add_column(u'games_game', 'is_over',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Game.is_over'
db.delete_column(u'games_game', 'is_over')
models = {
u'environment.environment': {
'Meta': {'object_name': 'Environment'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'games.activehero': {
'Meta': {'object_name': 'ActiveHero'},
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['games.Game']"}),
'hero': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['heroes.Hero']"}),
'hp': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'games.game': {
'Meta': {'object_name': 'Game'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['environment.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_over': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['profiles.Profile']"}),
'villain': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['villains.Villain']"}),
'villain_hp': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'heroes.hero': {
'Meta': {'object_name': 'Hero'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'starting_hp': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'profiles.profile': {
'Meta': {'object_name': 'Profile'},
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'villains.villain': {
'Meta': {'object_name': 'Villain'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'starting_hp': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['games'] | [
"lamthierry@gmail.com"
] | lamthierry@gmail.com |
406ab68986f731db4cd6f052760f43c444130111 | c3beb0a910bb827186c92a7c789a762fab1c5025 | /matrix.py | d58347d2268c6c1a278c5f84c0cae4122299de69 | [] | no_license | harrimand/harrimand.github.io | d90793164feb652f746fad5031c57aee787d22b3 | 320f57eb74bb2e3edd53396e8bce0056f0d281b5 | refs/heads/master | 2021-07-08T14:40:04.251667 | 2020-06-23T16:25:45 | 2020-06-23T16:25:45 | 131,474,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,642 | py | # Matrix row operations calculator by Darrell Harriman harrimand@gmail.com
class Mat:
"""Perform matrix row operations on an n x n list """
def __init__(self, mtrx):
"""Create object from n x n list
Args:
mtrx (2D list of [int|float]): n x n list of integers or floats
"""
self.update(mtrx)
self.prec = 3
self.autoShow = True
def showRows(self):
"""Display labeled rows of matrix without formatting to set precision
Args:
none
"""
for i, m in enumerate(self.M):
print("\tRow " + str(i+1) + ": ", self.M[i])
print("\n")
def setAutoShow(self, ashow=True):
"""Automatically display matrix after row operations
Args:
ashow(boolean): if True, display matrix after row operations
"""
print("Setting autoShow to ", self.autoShow)
self.autoShow = ashow
def R(self, r):
"""Get row from 2D List
Args:
r (int): Row number 1 .. len(list)
"""
if r >= 1 and r <= len(self.M):
return self.M[r-1]
def augmentI(self):
"""Augment Matrix with len(list) by len(list) Identity Matrix
Args:
none
"""
L = len(self.M)
print("Augmenting Matrix with " + str(L) + " x " + str(L) + " Identity Matrix")
N = []
for i, m in enumerate(self.M):
for j, n in enumerate(m):
m[j] = n * 1.0
N.extend([m + [0.0 if not(i == ind) else 1.0 for ind in range(L)]])
self.update(N)
if self.autoShow: self.show()
def update(self, mtrx):
"""Update object with calculated mtrx
Args:
mtrx (2D list of [int|float]): n x n list of integers or floats
"""
self.M = mtrx
def addRows(self, rx, tx, con=1):
"""Add row or a multiple of a row to another row
Args:
rx(int): row to be updated with sum of row plus multiple
of another row.
tx(int): row to be multiplied by a constant and added to row(rx)
con([int|float]): integer or float multiplicand for tx
"""
if rx >= 1 and rx <= len(self.M) and tx >= 1 and tx <= len(self.M):
print("Adding " + ((str(con) + " * ") if con != 1 else "")
+ "Row " + str(tx) + " to Row " + str(rx))
N = []
for i, m in enumerate(self.M):
if i == rx - 1:
N.extend([[x+y * con for x, y in zip(self.M[i], self.M[tx - 1])]])
else:
N.extend([m])
self.update(N)
if self.autoShow: self.show()
else: pass
def subRows(self, rx, tx, con=1):
"""Subtract row or a multiple of a row from another row
Args:
rx(int): row to be updated with row minus a multiple
of another row.
tx(int): row to be multiplied by a constant and subtracted from row(rx)
con([int|float]): integer or float multiplicand for tx
"""
if rx >= 1 and rx <= len(self.M) and tx >= 1 and tx <= len(self.M):
print("Subtracting " + ((str(con) + " * ") if con != 1 else "")
+ "Row " + str(tx) + " from Row " + str(rx))
N = []
for i, m in enumerate(self.M):
if i == rx - 1:
N.extend([[x - (y * con) for x, y in zip(self.M[i], self.M[tx - 1])]])
else:
N.extend([m])
self.update(N)
if self.autoShow: self.show()
else: pass
def mulRow(self, rx, con=1):
"""Multiply row by a constant
Args:
rx(int): row to be updated with product of itself and a constant
con([int|float]): integer or float multiplicand for rx
"""
print("Multiplying Row " + str(rx) + " by " + str(con))
N = []
for i, m in enumerate(self.M):
if i == rx - 1:
N.extend([[x * con for x in self.M[i]]])
else:
N.extend([m])
self.update(N)
if self.autoShow: self.show()
def divRow(self, rx, con=1):
"""Divide row by a constant
Args:
rx(int): row to be updated with quotient of itself and a constant
con([int|float]): integer or float divisor for rx
"""
print("Dividing Row " + str(rx) + " by " + str(con))
N = []
for i, m in enumerate(self.M):
if i == rx - 1:
N.extend([[x / con for x in self.M[i]]])
else:
N.extend([m])
self.update(N)
if self.autoShow: self.show()
def swapRows(self, rX, rY):
"""Exchange row rX with row rY
Args:
rx(int): row to be swapped with row rY
rY(int): row to be swapped with row rX
"""
print("Swapping Row " + str(rX) + " with Row " + str(rY))
N = []
for i, m in enumerate(self.M):
if i == rX - 1:
N.extend([[x for x in self.M[rY-1]]])
elif i == rY - 1:
N.extend([[x for x in self.M[rX-1]]])
else:
N.extend([m])
self.update(N)
if self.autoShow: self.show()
def subMatrixRight(self):
"""Replace Matrix with right half of matrix
Args:
none
"""
N = []
for m in self.M:
N.extend([m[int(len(m)/2):]])
self.update(N)
if self.autoShow: self.show()
def setPrecision(self, P=3):
"""Set displayed precision of elements in m matrix.
Args:
P([int]) Number of digits following decimal point.
If left blank, sets precision to 3 digits
"""
print("Setting display precision to ", str(P))
self.prec = P
if self.autoShow: self.show()
def show(self):
"""Print Matrix (values formated with set precision)
Args:
none
"""
formatStr = '{:' + str(8) + '.' + str(self.prec) + 'f}'
for i, m in enumerate(self.M):
print("|", end="")
for n in m:
print(" " if (self.prec > 4) else "", end="")
print(formatStr.format(n if not(n == 0.0) else 0.0),end="")
print(" |")
print("\n")
def get(self):
"""Returns 2D list populated with matrix values
Args:
none
"""
return self.M
def help(self):
print("\n\
R(r) Get row from 2D List\n\
augmentI() Augment with len(list) by len(list) Identity Matrix\n\
swapRows(rX, rY) Exchange row rX with row rY\n\
addRows(rx, tx, [con=1]) Add (row tx * con) to row rx\n\
subRows(rx, tx, [con=1]) Subtract (row tx * con) from row rx\n\
mulRow(rx, [con=1]) Multiply row rx by a constant\n\
divRow(rx, [con=1]) Divide row rx by a constant\n\
show() Print Matrix (values formated with set precision)\n\
showRows() Display labeled rows of matrix without formatting\n\
setAutoShow([True|False]) Automatic display after row operations\n\
setPrecision([P=3]) Set displayed precision of elements in matrix.\n\
subMatrixRight() Replace Matrix with right half of matrix\n\
get() Returns 2d list populated with matrix values")
| [
"harrimand@gmail.com"
] | harrimand@gmail.com |
7b1cace4b05ef5b8084f9ee772239a20e624f9ab | 675e0923fe358a82f23beb8705a49159583830e9 | /LEVELUP/FizzBuzz/test_fizzbuzz.py | 125ccfbad3fca77471fe1c18ffb270cdfdced495 | [
"MIT"
] | permissive | Simbadeveloper/AndelaCodeCamp | c846a87cff3d56bb282ccabd35bcc8070d060ace | 9c61d087329920eb04214268c5085644b23de114 | refs/heads/master | 2022-12-10T23:08:38.772918 | 2019-06-14T08:53:00 | 2019-06-14T08:53:00 | 158,019,808 | 0 | 0 | MIT | 2022-12-08T01:20:32 | 2018-11-17T19:58:55 | Python | UTF-8 | Python | false | false | 646 | py | import unittest
from fizzbuzz import FizzBuzz
class TestFizzBuzz(unittest.TestCase):
def test_simple_should_return_the_number(self):
self.assertEqual(FizzBuzz(1), 1)
self.assertEqual(FizzBuzz(2), 2)
self.assertEqual(FizzBuzz(4), 4)
def test_multiple_3_should_return_fizz(self):
self.assertEqual(FizzBuzz(3), "fizz")
self.assertEqual(FizzBuzz(9), "fizz")
def test_multiple_5_should_return_buzz(self):
self.assertEqual(FizzBuzz(5), "buzz")
self.assertEqual(FizzBuzz(10), "buzz")
def test_multiple_3_and_5_should_return_fizzbuzz(self):
self.assertEqual(FizzBuzz(15), "fizzbuzz")
self.assertEqual(FizzBuzz(30), "fizzbuzz")
| [
"silverdeltamega@gmail.com"
] | silverdeltamega@gmail.com |
574f26adf2ea850867892f75bcbba987c8f63156 | c7aa86fc206ccdb91e89295704dd068892bda6f6 | /challenge16.py | f2c61155148ece6adf009b490b3dbe91713d77b7 | [] | no_license | ritobanrc/cryptopals | f2bb031f07de172f1f2735feae79a8f21dbc4e75 | 61c84db90516a915a24f4421d1216b4d2723579a | refs/heads/master | 2020-03-16T13:08:49.489344 | 2020-01-22T02:09:09 | 2020-01-22T02:09:09 | 132,682,310 | 0 | 0 | null | 2018-05-09T21:52:21 | 2018-05-09T00:59:52 | Python | UTF-8 | Python | false | false | 2,519 | py | #!/usr/bin/env python3
from challenge11 import random_string
from Crypto.Cipher import AES
from urllib.parse import quote, unquote
from challenge9 import pkcs7padding
from challenge10 import aes_cbc_encrypt, aes_cbc_decrypt
from challenge15 import strip_pkcs7padding
from util import *
from challenge2 import fixed_xor
from struct import pack
random_key = random_string(AES.key_size[0])
random_iv = random_string(AES.key_size[0])
def build_profile(userdata):
plaintext = ('comment1=cooking%20MCs;userdata=' + quote(userdata) + ';comment2=%20like%20a%20pound%20of%20bacon')
plaintext = bytearray(plaintext, encoding='utf-8')
plaintext = pkcs7padding(plaintext)
ciphertext = aes_cbc_encrypt(plaintext, random_key, random_iv)
return ciphertext
def authenticate(ciphertext):
plaintext = aes_cbc_decrypt(ciphertext, random_key, random_iv)
plaintext = strip_pkcs7padding(plaintext)
info_dict = {}
for pair in plaintext.split(b';'):
k, v = pair.split(b'=')
info_dict[bytes(unquote(k.decode(errors='ignore')), encoding='utf-8')] = \
bytes(unquote(v.decode(errors='ignore')), encoding='utf-8')
if b'admin' in info_dict and info_dict[b'admin'] == b'true':
print('Logged in as admin. ')
return True, plaintext
else:
print('Logged in as regular user')
return False, plaintext
def modify_ciphertext(ciphertext):
# if we modify a certain block, it will be xor-ed with the ciphertext in the block after.
new_ciphertext = bytearray(ciphertext)
# this gives us the plaintext "comment1=whatever;userdata=hacker;commment2=whatever"
block_print() # keep stdout clean
_, plaintext = authenticate(ciphertext)
enable_print()
# b'comment1=cooking|%20MCs;userdata=|hacker;comment2=|%20like%20a%20po|und%20of%20bacon'
# Unchanged Unchanged Scrambled We Control Unchanged
to_xor = fixed_xor(plaintext[48:64], b'a;admin=true;ab=')
to_xor = pack('16B', *to_xor)
new_ciphertext[32:48] = fixed_xor(new_ciphertext[32:48], to_xor)
print_split_blocks_hex(ciphertext)
print_split_blocks_hex(new_ciphertext)
# we know that the plaintext
return bytes(new_ciphertext)
def main():
ciphertext = build_profile('hacker') # this shouldn't work if I wrote the authenticate function correctly
ciphertext = modify_ciphertext(ciphertext)
success, plaintext = authenticate(ciphertext)
print(bytes(plaintext))
if __name__ == '__main__':
main()
| [
"ritobanrc@gmail.com"
] | ritobanrc@gmail.com |
d1d2b859904e2d714def63d27148d911b68e70b9 | 43e900f11e2b230cdc0b2e48007d40294fefd87a | /Facebook/PhoneInterview/shortest_job_first.py | 02f0c808d6aad85a7f1028a46e84e57fdcc92298 | [] | no_license | DarkAlexWang/leetcode | 02f2ed993688c34d3ce8f95d81b3e36a53ca002f | 89142297559af20cf990a8e40975811b4be36955 | refs/heads/master | 2023-01-07T13:01:19.598427 | 2022-12-28T19:00:19 | 2022-12-28T19:00:19 | 232,729,581 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,058 | py | import heapq
class process:
def process(self, arr, exe):
self.arrTime = arr
self.exeTime = exe
class Solution:
def shortest_job_first(self, req, dur):
if req == None or dur == None or len(req) != len(dur):
return 0
index, length = 0, len(req)
waitTime, curTime = 0, 0
pq = []
if p1.exeTime == p2.exeTime:
return p1.arrTime - p2.arrTime
return p1.exeTime - p2.exeTime
while pq or index < length:
if pq:
cur = pq.heappushpop()
waitTime += curTIme - cur.arrTime
curTime += cur.exeTime
while index < length and curTime >= req[index]:
pq.heappush((req[index], dur[index+1]))
else:
pq.heappush(req[index], dur[index])
curTime = req[index + 1]
return round(waitTime/length, 2)
if __name__ == '__main__':
solution = Solution()
res = solution.shortest_job_first([1,2, 3, 4], [1, 2, 3, 4])
print(res)
| [
"wangzhihuan0815@gmail.com"
] | wangzhihuan0815@gmail.com |
ee5988f7474b4fc537308710e17b74a741581fd1 | d8ea695288010f7496c8661bfc3a7675477dcba0 | /django/ewm/file/admin.py | edbb70030fbb242451d77d39501bf657b1b2fdb1 | [] | no_license | dabolau/demo | de9c593dabca26144ef8098c437369492797edd6 | 212f4c2ec6b49baef0ef5fcdee6f178fa21c5713 | refs/heads/master | 2021-01-17T16:09:48.381642 | 2018-10-08T10:12:45 | 2018-10-08T10:12:45 | 90,009,236 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 660 | py | from django.contrib import admin
from file.models import *
###
# 注册附件信息数据库模型和自定义功能
###
@admin.register(File)
class FileAdmin(admin.ModelAdmin):
###
# 管理中要显示的字段名称
###
list_display = [
'file_name',
# 'manufacturer_name',
# 'model_specification',
# 'equipment_location',
# 'enable_date',
]
###
# 管理中要搜索的字段名称
###
search_fields = [
'file_name',
# 'manufacturer_name',
# 'model_specification',
# 'equipment_location',
# 'enable_date',
]
| [
"dabolau@qq.com"
] | dabolau@qq.com |
531fc91cb726d02cb67f66ef2e47ef8a4d3e8302 | f5f77e0d6e7d96099ca23898136ea53a11ba65a1 | /server/webDB/urls.py | b037ac4a122d26fe8ab6a17dc0c5b92b1a413b84 | [
"Apache-2.0"
] | permissive | ZihuanLing/webDB | 4ea4817d21f9800961c3df0c35cae2c79378fdbf | 791a0b78bae0fe1ce454506a1bf37d6de5e7f00f | refs/heads/master | 2020-09-16T17:13:52.786209 | 2020-05-10T14:56:01 | 2020-05-10T14:56:01 | 223,838,672 | 1 | 0 | Apache-2.0 | 2020-05-10T14:27:42 | 2019-11-25T01:35:39 | null | UTF-8 | Python | false | false | 325 | py | from apps.users.urls import urlpattern as user_urls
from apps.manager.urls import urlpattern as manager_urls
from webDB.handlers import MainHanler, MyStaticFileHandler
urlpattern = [
(r'/?', MainHanler),
(r'/media/(.*)', MyStaticFileHandler, {'path': 'media'})
]
urlpattern += user_urls
urlpattern += manager_urls
| [
"neil.ling@foxmail.com"
] | neil.ling@foxmail.com |
fb40926909edaa9b82c0af122712d25e9e9545f3 | 82bf716e7fae42d0e51a1265f671b84c3d86f607 | /kaggle/natureconservacyfisheriesmonitoring/annotations.py | f2ff1479c3250c42c21563bafd653f9f949c8041 | [] | no_license | icostan/machine-learning | c18aca60d267e1242c0986694f7251cd40385653 | 9f2482cf58d38d0ed4c500929ea0956aa5689212 | refs/heads/master | 2022-01-21T07:26:01.484077 | 2019-05-04T07:29:27 | 2019-05-04T07:29:27 | 84,664,295 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 507 | py | import json
import functools
def for_image(filename, fish):
for a in for_fish(fish):
if a['filename'] == filename:
if len(a['annotations']) > 0:
return a['annotations'][0]
else:
return None
@functools.lru_cache(maxsize=None)
def for_fish(fish='LAG'):
fish = fish.lower()
path = 'annotations/{fish}_labels.json'.format(fish=fish)
with open(path) as data_file:
annotations = json.load(data_file)
return annotations
| [
"iulian.costan@gmail.com"
] | iulian.costan@gmail.com |
c9f2c965fb311a19aa5b1a065f129044fc7f7560 | 90e8025b1b17c3d10057ff2249aef9462c0d7714 | /mysite/mysite/urls.py | 3a4c550ad3ba1e72905ffe1edc238c1a48d80cf3 | [] | no_license | magler/djangoproject | 22b42e62d3a1f7f713060d674439a950993f3395 | ddacc01c08626f1e53902dde1ab8d8e59007eac9 | refs/heads/master | 2021-01-10T19:27:05.549951 | 2015-01-06T07:46:23 | 2015-01-06T07:46:23 | 25,502,076 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'mysite.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^polls/', include('polls.urls')),
url(r'^getdbinfo/', include('getdbinfo.urls')),
)
| [
"mike.t.alger@gmail.com"
] | mike.t.alger@gmail.com |
62f86d9acac14e31bff63aef82dd07d7ffd8c325 | 88dedf8b121fd07065a103e68e44276d596dbd41 | /codewars/6 kyu/supermarket_queue.py | cd5db6589dc6da1cd161683ded44cb362c166039 | [] | no_license | azizsaad/55-CodeWars-Problems | 9161a1649e6dc7757ef406f515e20db4020a60a0 | 669800fb1fc974f1c78dea78b4041d3c518e3b6b | refs/heads/master | 2022-12-23T15:47:25.767178 | 2020-09-24T21:08:29 | 2020-09-24T21:08:29 | 298,393,844 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | def queue_time(customers, n):
if customers == []:
return 0
tills = []
for index, item in enumerate(customers):
if len(tills) == n:
val = tills.index(min(tills))
tills[val] = tills[val] + item
else:
tills.append(item)
return max(tills)
print (queue_time([2,2,3,3,4,4], 2))
| [
"noreply@github.com"
] | azizsaad.noreply@github.com |
04153bb5d37ce2d86223a0bd652aa1f3ce650c12 | 4a344071b0dc0e43073f5aa680dc9ed46074d7db | /azure/mgmt/network/models/network_management_client_enums.py | 5f0cd8bb7a6b49af179f7ff4a5221d9d397fba4a | [] | no_license | pexip/os-python-azure-mgmt-network | 59ee8859cda2a77e03c051d104b1e8d2f08c1fe3 | fa4f791818b8432888c398b3841da58e5aeb370b | refs/heads/master | 2023-08-28T05:02:29.843835 | 2017-02-28T11:38:16 | 2017-02-28T11:47:18 | 54,524,386 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,166 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class TransportProtocol(Enum):
udp = "Udp"
tcp = "Tcp"
class IPAllocationMethod(Enum):
static = "Static"
dynamic = "Dynamic"
class IPVersion(Enum):
ipv4 = "IPv4"
ipv6 = "IPv6"
class SecurityRuleProtocol(Enum):
tcp = "Tcp"
udp = "Udp"
asterisk = "*"
class SecurityRuleAccess(Enum):
allow = "Allow"
deny = "Deny"
class SecurityRuleDirection(Enum):
inbound = "Inbound"
outbound = "Outbound"
class RouteNextHopType(Enum):
virtual_network_gateway = "VirtualNetworkGateway"
vnet_local = "VnetLocal"
internet = "Internet"
virtual_appliance = "VirtualAppliance"
none = "None"
class ApplicationGatewayProtocol(Enum):
http = "Http"
https = "Https"
class ApplicationGatewayCookieBasedAffinity(Enum):
enabled = "Enabled"
disabled = "Disabled"
class ApplicationGatewayBackendHealthServerHealth(Enum):
unknown = "Unknown"
healthy = "Healthy"
unhealthy = "Unhealthy"
partial = "Partial"
class ApplicationGatewaySkuName(Enum):
standard_small = "Standard_Small"
standard_medium = "Standard_Medium"
standard_large = "Standard_Large"
waf_medium = "WAF_Medium"
waf_large = "WAF_Large"
class ApplicationGatewayTier(Enum):
standard = "Standard"
waf = "WAF"
class ApplicationGatewaySslProtocol(Enum):
tl_sv1_0 = "TLSv1_0"
tl_sv1_1 = "TLSv1_1"
tl_sv1_2 = "TLSv1_2"
class ApplicationGatewayRequestRoutingRuleType(Enum):
basic = "Basic"
path_based_routing = "PathBasedRouting"
class ApplicationGatewayOperationalState(Enum):
stopped = "Stopped"
starting = "Starting"
running = "Running"
stopping = "Stopping"
class ApplicationGatewayFirewallMode(Enum):
detection = "Detection"
prevention = "Prevention"
class AuthorizationUseStatus(Enum):
available = "Available"
in_use = "InUse"
class ExpressRouteCircuitPeeringAdvertisedPublicPrefixState(Enum):
not_configured = "NotConfigured"
configuring = "Configuring"
configured = "Configured"
validation_needed = "ValidationNeeded"
class ExpressRouteCircuitPeeringType(Enum):
azure_public_peering = "AzurePublicPeering"
azure_private_peering = "AzurePrivatePeering"
microsoft_peering = "MicrosoftPeering"
class ExpressRouteCircuitPeeringState(Enum):
disabled = "Disabled"
enabled = "Enabled"
class ExpressRouteCircuitSkuTier(Enum):
standard = "Standard"
premium = "Premium"
class ExpressRouteCircuitSkuFamily(Enum):
unlimited_data = "UnlimitedData"
metered_data = "MeteredData"
class ServiceProviderProvisioningState(Enum):
not_provisioned = "NotProvisioned"
provisioning = "Provisioning"
provisioned = "Provisioned"
deprovisioning = "Deprovisioning"
class LoadDistribution(Enum):
default = "Default"
source_ip = "SourceIP"
source_ip_protocol = "SourceIPProtocol"
class ProbeProtocol(Enum):
http = "Http"
tcp = "Tcp"
class EffectiveRouteSource(Enum):
unknown = "Unknown"
user = "User"
virtual_network_gateway = "VirtualNetworkGateway"
default = "Default"
class EffectiveRouteState(Enum):
active = "Active"
invalid = "Invalid"
class VirtualNetworkPeeringState(Enum):
initiated = "Initiated"
connected = "Connected"
disconnected = "Disconnected"
class VirtualNetworkGatewayType(Enum):
vpn = "Vpn"
express_route = "ExpressRoute"
class VpnType(Enum):
policy_based = "PolicyBased"
route_based = "RouteBased"
class VirtualNetworkGatewaySkuName(Enum):
basic = "Basic"
high_performance = "HighPerformance"
standard = "Standard"
ultra_performance = "UltraPerformance"
class VirtualNetworkGatewaySkuTier(Enum):
basic = "Basic"
high_performance = "HighPerformance"
standard = "Standard"
ultra_performance = "UltraPerformance"
class ProcessorArchitecture(Enum):
amd64 = "Amd64"
x86 = "X86"
class VirtualNetworkGatewayConnectionStatus(Enum):
unknown = "Unknown"
connecting = "Connecting"
connected = "Connected"
not_connected = "NotConnected"
class VirtualNetworkGatewayConnectionType(Enum):
ipsec = "IPsec"
vnet2_vnet = "Vnet2Vnet"
express_route = "ExpressRoute"
vpn_client = "VPNClient"
class NetworkOperationStatus(Enum):
in_progress = "InProgress"
succeeded = "Succeeded"
failed = "Failed"
| [
"jmb@pexip.com"
] | jmb@pexip.com |
be0a81ed388196df8ff3bef44b89a2f28f6d4ded | d7aa17e5310ced171b4a99c72ab0cc0fdb976db7 | /homework7_test.py | 32f286135f7db2cfc1eb39418aef451b3044b321 | [] | no_license | cookieeeeee/homework-7 | 52e47a617b04d1d8400e1cdc5cdb4afddf3d0227 | 768ce5107c149442bf4eab3cebcfa5c67e94ec7c | refs/heads/master | 2020-07-13T18:26:06.102562 | 2016-11-16T05:15:57 | 2016-11-16T05:15:57 | 73,886,474 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,559 | py | from homework7 import *
import unittest
import string
class TestHomework(unittest.TestCase):
def testTokenize(self):
string = " This is an example. "
self.assertEqual(tokenize(string),['This', 'is', 'an', 'example', '.'])
string = "'Medium-rare,' she said."
self.assertEqual(tokenize(string),["'", 'Medium', '-', 'rare', ',', "'",'she', 'said', '.'])
def testNgrams(self):
result = [((), 'a'), ((), 'b'), ((), 'c'),((), '<END>')]
check = ngrams(1, ["a", "b", "c"])
self.assertEqual(check,result)
result = [(('<START>',), 'a'), (('a',), 'b'),(('b',), 'c'), (('c',), '<END>')]
check = ngrams(2, ["a", "b", "c"])
self.assertEqual(check,result)
result = [(('<START>', '<START>'), 'a'),(('<START>', 'a'), 'b'),(('a', 'b'), 'c'),(('b', 'c'), '<END>')]
check = ngrams(3, ["a", "b", "c"])
self.assertEqual(check,result)
result = [(('<START>',),'the'),(('the',), 'cow'),(('cow',), 'jumps'),(('jumps',), 'over'),(('over',), 'the'),(('the',), 'moon'),(('moon',),'<END>')]
check = ngrams(2, ['the','cow' ,'jumps' ,'over' ,'the' ,'moon'])
self.assertEqual(check,result)
def testNgramModel(self):
m = NgramModel(1)
m.update("a b c d")
m.update("a b a b")
result = 0.3
check = m.prob((), "a")
self.assertEqual(check,result)
unittest.main()
| [
"noreply@github.com"
] | cookieeeeee.noreply@github.com |
68cf18a15a097151a12e5d883663349f357220f1 | 8382222033944cca777252870411b59d38789236 | /src/reader.py | 03a9a4634cae953d51d63ada28f0d1070c26dd00 | [
"MIT"
] | permissive | prasanna08/tipr-second-assignment | d4db9270079d2279b8657dd2854623d6d89a8cf1 | cfd1a8bcfe01175dfd79e3566914c5d571c2df28 | refs/heads/master | 2020-04-26T07:54:17.258465 | 2019-03-04T03:59:37 | 2019-03-04T16:23:04 | 173,407,465 | 0 | 0 | null | 2019-03-02T05:30:09 | 2019-03-02T05:30:09 | null | UTF-8 | Python | false | false | 1,033 | py | import os
import skimage
import copy
from sklearn.preprocessing import OneHotEncoder
import numpy as np
def read_dataset(dpath, resize=None):
x = []
y = []
for i in os.listdir(dpath):
print('reading images from %s folder.' % (i))
for image in os.listdir(os.path.join(dpath, i)):
if image.endswith('.jpg'):
img = skimage.io.imread(os.path.join(dpath, i, image), as_gray=True)
if resize:
img = skimage.transform.resize(img, resize)
x.append(img)
y.append(i)
data_x = rescale(np.array(x)).astype(np.float16).reshape(len(x), -1)
data_y = one_hot_transform(y)
return data_x, data_y
def rescale(x, scale=1./255):
return x * scale
def one_hot_transform(labels):
encoder = OneHotEncoder(categories='auto')
encoder.fit(np.unique(labels).reshape(-1, 1))
encoded_labels = np.asarray(encoder.transform(np.array(labels).reshape(-1, 1)).todense()).astype(np.uint8)
return encoded_labels
| [
"prasannapatil08@gmail.com"
] | prasannapatil08@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.