blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5b3651f83279fd802146deddad7c8e5c8972ac46 | 58b3205da144d8019941bfdd118608d3b4b13a0e | /update-sha1sums.py | 215d465cedad0da6859f43de88410573892cac3f | [] | no_license | CakesTwix/android_device_leeco_s2 | a2f5cc6624eed58a398316005c3cac5f224c104f | 285aa1b403052b5ced5e6aa425d2f49035dbcafe | refs/heads/ancient-Q | 2023-03-08T02:34:12.884564 | 2020-06-25T16:28:18 | 2020-10-27T15:01:27 | 257,221,728 | 2 | 17 | null | 2020-10-27T15:01:29 | 2020-04-20T08:43:52 | C++ | UTF-8 | Python | false | false | 2,068 | py | #!/usr/bin/env python
#
# Copyright (C) 2019 The ArrowOS Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from hashlib import sha1
import sys
device='s2'
vendor='leeco'
lines = [ line for line in open('proprietary-files-qc.txt', 'r') ]
vendorPath = '../../../vendor/' + vendor + '/' + device + '/proprietary'
needSHA1 = False
def cleanup():
for index, line in enumerate(lines):
# Remove '\n' character
line = line[:-1]
# Skip empty or commented lines
if len(line) == 0 or line[0] == '#':
continue
# Drop SHA1 hash, if existing
if '|' in line:
line = line.split('|')[0]
lines[index] = '%s\n' % (line)
def update():
for index, line in enumerate(lines):
# Remove '\n' character
line = line[:-1]
# Skip empty lines
if len(line) == 0:
continue
# Check if we need to set SHA1 hash for the next files
if line[0] == '#':
needSHA1 = (' - from' in line)
continue
if needSHA1:
# Remove existing SHA1 hash
line = line.split('|')[0]
filePath = line.split(':')[1] if len(line.split(':')) == 2 else line
if filePath[0] == '-':
file = open('%s/%s' % (vendorPath, filePath[1:]), 'rb').read()
else:
file = open('%s/%s' % (vendorPath, filePath), 'rb').read()
hash = sha1(file).hexdigest()
lines[index] = '%s|%s\n' % (line, hash)
if len(sys.argv) == 2 and sys.argv[1] == '-c':
cleanup()
else:
update()
with open('proprietary-files-qc.txt', 'w') as file:
for line in lines:
file.write(line)
file.close()
| [
"a98mondal@gmail.com"
] | a98mondal@gmail.com |
fd7dc1fd882e3f15ce0bb7842362fb8e3ec598eb | 9c3420c043cde8a29d8520eec48a3699adbb78f1 | /admin_privileges.py | d6b318913fd7f4c635c6df5d4179c780be5c31eb | [] | no_license | sdkcouto/exercises-coronapython | c90eae32d0a519955b9a70ec5ff2b36aa793e26a | 36fbed5f41abfffe935bf79aee3c3703b996a058 | refs/heads/master | 2022-11-13T18:35:23.377248 | 2020-06-19T18:02:57 | 2020-06-19T18:02:57 | 261,004,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | # helper file for chapter_9_9_12.py
import user_solo
class Privileges:
def __init__(self,privileges =["can add post","can ban user","can add post","can delete post"]):
self.privileges = privileges
def show_privileges(self):
print("These are the user's privileges: ")
for privilege in self.privileges:
print(privilege)
class Admin(user_solo.User):
def __init__(self,first_name,last_name,age,gender):
super().__init__(first_name,last_name,age,gender)
self.privileges = Privileges()
| [
"matheuscouto.eng@gmail.com"
] | matheuscouto.eng@gmail.com |
9ccb6a64732def6e88189cd76fa430f1024f910e | 9e5426317fbe4bce66bdd85ff940b47f1891dd33 | /get_remote_list.py | 3e89e39ea66a3c65e228eb0f4d59493b6a71b2e5 | [] | no_license | mancdaz/myowncf-scripts | 74b58303339ec003785ce31f75af2ad954ca120a | c94374538012e1c0c623a9d299ca25ea4d1a6acb | refs/heads/master | 2021-01-10T20:44:26.499658 | 2011-04-08T23:02:12 | 2011-04-08T23:02:12 | 1,511,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | #!/usr/bin/env python
"""
Get all files in a cloudfiles container and print
them out - works for containers with >10000 objects
"""
import cloudfiles
import ConfigParser
from sys import argv
# get our config from the config file
config = ConfigParser.ConfigParser()
config.read("./cf.ini")
USERNAME = config.get("auth", "username")
AUTH_URL = config.get("auth", "url")
API_KEY = config.get("auth", "key")
try:
CONTAINER = argv[1]
print "Using container: \'%s\'" % CONTAINER
except:
CONTAINER = "test.birkett"
print "Using default container: \'%s\'" % CONTAINER
# create the connection object
conn = cloudfiles.get_connection(USERNAME,API_KEY,authurl = AUTH_URL)
# get the container object
container = conn.get_container(CONTAINER)
# print some container details
print "total size of \'%s\' container: %d bytes" %(container, container.size_used)
print "total number of objects in \'%s\' container: %d" % (container, container.object_count)
# build the list 10000 at a time
last_marker = ''
counter = 0
mainlist = []
print "Just populating the list..."
while (counter < container.object_count):
mylist = container.get_objects(marker=last_marker)
print "Just grabbing files %d to %d" % (counter, counter + len(mylist))
counter += 10000
last_marker = mylist[-1]
# extend mainlist by adding current iteration of mylist
mainlist += mylist
# print the entire main list out
obnum = 1
for object in mainlist:
print "object number %d: %s" % (obnum, object)
obnum += 1
| [
"mancdaz1203@gmail.com"
] | mancdaz1203@gmail.com |
2ba076dd4fc496d59434bcc89d87437baad8b2de | 6834627827d03f0028f0d8726068a6411bf0ae6d | /targetSumPair.py | c8c5523bc6a62794beadf7cec072a0d11a19fd03 | [] | no_license | dimkary/Python-tests | a5a45b2649d3a019f60e7103b715df59263a3bb9 | 6a2a7a2509842a3620fe69664daf9760039074ac | refs/heads/master | 2022-06-08T03:01:12.652345 | 2022-03-31T19:10:57 | 2022-03-31T19:10:57 | 163,187,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 20 09:49:20 2018
@author: karips
In this application, we make use of the Python's dictionairy.
Each iteration check if the target_sum-current number exists in the dictionairy.
If it exists, the the current number and the one found will add up to the target_sum.
Then we just get the values(indices) from the dictionairy
"""
class TwoSum:
@staticmethod
def find_two_sum(numbers, target_sum):
"""
:param numbers: (list of ints) The list of numbers.
:param target_sum: (int) The required target sum.
:returns: (a tuple of 2 ints) The indices of the first two elements found
whose sum is equal to target_sum, else None
"""
dic={}
result = None
for i in range(len(numbers)):
if target_sum-numbers[i] in dic:
result=(dic[target_sum-numbers[i]],i)
dic[numbers[i]]=i
return result
print(TwoSum.find_two_sum([3, 3, 5,5, 2, 3, 9], 10))
| [
"dim.karypidis@outlook.com"
] | dim.karypidis@outlook.com |
0de11dd8290d6b16c501e90b2f5c185cde415715 | fc85cb6a7ddddf9b9017a5522980c46307e23baa | /small_tasks.py | 00ac78840aa134759675ae7b5afffd9fdaccde45 | [] | no_license | csirota97/ALVAN | 7742159b0dde60b828d7d0c711b1b18e3e0d680a | d81ea6b2eab2066e4f6fe4fb2f41739cc8528e98 | refs/heads/master | 2021-03-28T22:38:58.300446 | 2020-03-20T05:37:55 | 2020-03-20T05:37:55 | 247,902,951 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,153 | py | import random, time, os
import speech_recognition as sr
import Speak
speak = Speak.speak_OS
def flip_coin():
if random.randint(0,1):
return "Heads"
return "Tails"
def roll_die():
num = random.randint(1,6)
if num == 1:
return '1'
elif num == 2:
return '2'
elif num == 3:
return '3'
elif num == 4:
return '4'
elif num == 5:
return '5'
return '6'
def count(query, OS):
if 'to' not in query:
r = sr.Recognizer()
with sr.Microphone() as source:
while True:
try:
speak('how would you like to count?', OS)
os.system("echo -ne '\007'")
audio = r.listen(source, timeout = 4)
os.system("echo -ne '\007'")
# print('e')
query = r.recognize_google(audio).lower()
valid = True
# print('c')
except:
valid = False
if valid:
break
try:
to = 1
t = False
tf = False
frm = 1
f = False
ff = False
by = 1
b= False
bf = False
words = query.split()
for word in words:
if word == "to":
t = True
tf = True
elif t == True:
t = False
to = int(word)
if word == "from":
f = True
ff = True
elif f == True:
f = False
frm = int(word)
if word == "by" or word == "x":
b = True
bf = True
elif b == True:
b = False
by = int(word)
if bf and not ff:
frm = by
if to > 100:
return "Try something smaller"
sequence = str(frm)
if frm+by <= to:
for i in range(frm+by, to+1, by):
sequence += "\; {}".format(i)
return sequence
except:
return "Please try again" | [
"csirota97@gmail.com"
] | csirota97@gmail.com |
ba6a6412d281aa00758d3cb2bb772f7a19c698b9 | bb6af3ddbc49e9165cb70f7ff7567a774eaa8be0 | /pythonBasics/ClassImp.py | 4997439c93a60e7c29b28f356df4b85c116e6b70 | [] | no_license | deekshasingh2k8/GitDemo | 06b5d3a3257f375d4ddb127d7465dd0f9a78996a | 02f3086c3715a41e36d8d7b6295d6ffa39137241 | refs/heads/master | 2022-12-20T22:57:27.115717 | 2020-10-05T07:33:09 | 2020-10-05T07:33:09 | 274,167,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | from pythonBasics.OopsDemo import Calculator
class ChildImpl(Calculator):
num2 = 200
def __init__(self):
Calculator.__init__(self, 2, 10)
def getCompleteData(self):
return self.num2 + self.num + self.Summation()
obj = ChildImpl()
print(obj.getCompleteData())
| [
"deeksha.singh2k8@gmail.com"
] | deeksha.singh2k8@gmail.com |
af26c4fdc29a339c96ebead1de8a73b4d0b81f6c | 37260ebce52ae392c70a8f5621d31fefe1c78cc3 | /page_replacement.py | a435e90c6bf90ce1e6c3501cd49516660f398525 | [] | no_license | ej-castillo/page-replacement-policies | bebd948ad5271818be9ad71443f3f3b42a9e46e7 | b7acb6fe3fcb9cd5ccff2133422439649b61f99d | refs/heads/master | 2020-08-15T22:32:49.966390 | 2019-10-15T23:48:58 | 2019-10-15T23:48:58 | 215,416,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | from collections import deque
def FIFO_cache_miss(demands, frames):
cache = deque()
demand_list = demands.strip().split()
misses = 0
for demand in demand_list:
if demand not in cache:
misses += 1
print("Misses: {}, Old cache: {} New Cache: ".format(misses, cache), end="")
if len(cache) == frames:
cache.popleft()
cache.append(demand)
print(cache)
if __name__ == "__main__":
demands = "0 1 2 3 0 1 4 0 1 2 3 4"
FIFO_cache_miss(demands, 3)
FIFO_cache_miss(demands, 4)
| [
"eugene.j.castillo@gmail.com"
] | eugene.j.castillo@gmail.com |
8ca92b81a95e5bcb8d2307a39de88e5f4615a50b | 3117426993b164f7cac9da52e8952d233ff1b040 | /habitTracker/core/migrations/0002_auto_20200306_1727.py | 398dbcaaabc3c59df0f2bcbc9f53b0745c9c9f1d | [] | no_license | momentum-cohort-2020-01/habit-tracker-team-black-angus | ff324606be11262a91e99eab057d0c3d41911923 | ef1ea6fa07225841453d07abb863422017c289e2 | refs/heads/master | 2021-02-16T00:45:44.689048 | 2020-03-08T20:26:41 | 2020-03-08T20:26:41 | 244,945,118 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 726 | py | # Generated by Django 3.0.4 on 2020-03-06 17:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='log',
name='user',
),
migrations.AddField(
model_name='habit',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='logs', to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| [
"justdmeyers@gmail.com"
] | justdmeyers@gmail.com |
da91ff9072b8b48ba611c5a766fa64a4e4bbdced | 47b7a1a07f75667dc8a36b5f71b9aa8b8289e8f2 | /model/Bayes/bayes_father/util/gather.py | fdb58554e2dbb345733382a94188b5af1fb9d02a | [] | no_license | javelir/SearchEngine | dc142cffd822c7aafbdf2b2e107c0cf34cf98c0b | 69ed9cdde4f29fb873730fc5ea29cfb4c574ea05 | refs/heads/master | 2020-07-25T19:01:56.985217 | 2016-05-27T09:29:49 | 2016-05-27T09:29:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,977 | py | #coding=utf-8
from __future__ import division
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
file_title = open('../data/percent_title.txt', 'r')
file_material = open('../data/percent_material.txt', 'r')
file_type = open('../data/percent_type.txt', 'r')
file_gather = open('./data/gather.txt', 'w+')
title_num = 185228
material_num = 393715
type_num = 720037
laplace_title = 1 / ( title_num * 2 )
laplace_material = 1 / ( material_num * 2 )
laplace_type = 1 / ( type_num * 2 )
dictory_title = {}
dictory_material = {}
dictory_type = {}
for line in file_title.readlines():
line = line.strip('\n')
result = line.split(':')
dictory_title[result[0]] = result[1]
file_title.close()
for line in file_material.readlines():
line = line.strip('\n')
result = line.split(':')
dictory_material[result[0]] = result[1]
file_material.close()
for line in file_type.readlines():
line = line.strip('\n')
result = line.split(':')
dictory_type[result[0]] = result[1]
file_type.close()
dictory = {}
for key in dictory_title:
percent = []
if dictory.has_key( key ):
pass
else :
percent.append(dictory_title[key])
percent.append( str(laplace_material) )
percent.append( str(laplace_type) )
dictory[key] = percent
for key in dictory_material:
percent = []
if dictory.has_key(key):
percent = dictory[key]
percent[1] = dictory_material[key]
else:
percent.append( str(laplace_title) )
percent.append(dictory_material[key])
percent.append( str(laplace_type) )
dictory[key] = percent
for key in dictory_type:
percent = []
if dictory.has_key( key ):
percent = dictory[key]
percent[2] = dictory_type[key]
else:
percent.append( str(laplace_title) )
percent.append( str(laplace_material) )
percent.append(dictory_type[key])
dictory[key] = percent
for key in dictory:
file_gather.write(key + ':')
for item in dictory[key]:
file_gather.write( item + ' ' )
file_gather.write('\n')
file_gather.close()
| [
"studywiller@gmail.com"
] | studywiller@gmail.com |
783f62b3ff7d1617eef1ba2182c8a0be799f0863 | 99af612b3e92f20c018facc966e70ccac18f7e4a | /agile_project/wsgi.py | 1c46d61578ad680759f8d75614e508ab43efe6b1 | [] | no_license | mina-rf/agile_project | 652733b51820bc27fcc64f6f784a721be1b9cb2a | 9c80b911eaa148990ee758d3f14dab2bc86bd642 | refs/heads/master | 2021-01-11T11:38:56.023928 | 2016-12-19T13:09:02 | 2016-12-19T13:09:02 | 76,863,165 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
WSGI config for agile_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "agile_project.settings")
application = get_wsgi_application()
| [
"rafiei.mina73@gmail.com"
] | rafiei.mina73@gmail.com |
688a1dd11a1d63c04e305b8ca5997bb45fca6165 | 1b4a754519a6b15425c95348face0510f8a22224 | /scripts/inpy | 09233a291fbcabe4730f330fae9c28cf811d3b38 | [] | no_license | gafton/scripts | 78a47b5df072617cb4546d32029c84509273de94 | 8aec2029fc13722d1828db2e6d63e5b582b1b300 | refs/heads/master | 2021-01-15T21:50:05.730557 | 2011-05-17T09:42:30 | 2011-05-17T09:42:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,136 | #!/usr/bin/python -i
# Enhance introspection at the python interactive prompt.
# This is a very simple alternative to ipython
# whose default settings I don't like.
# Notes:
# You can run it directly, or call it like:
# PYTHONSTARTUP=~/path/to/inpy python
# Changes:
# V0.1 09 Sep 2008 Initial release
# V0.2 30 Nov 2010
# http://github.com/pixelb/scripts/commits/master/scripts/inpy
import os
class _readline:
history=os.path.join(os.environ['HOME'],'.inpy_history')
import readline
# turn on tab completion
readline.parse_and_bind("tab: complete")
import rlcompleter
def __init__(self):
try:
self.readline.read_history_file(self.history)
except:
raise
def __del__(self):
try:
self.readline.write_history_file(self.history)
except:
raise
_rl=_readline()
import sys
# The following exits on Ctrl-C
def _std_exceptions(etype, value, tb):
sys.excepthook=sys.__excepthook__
if issubclass(etype, KeyboardInterrupt):
sys.exit(0)
else:
sys.__excepthook__(etype, value, tb)
sys.excepthook=_std_exceptions
#try to import dire() and ls()
#See http://www.pixelbeat.org/libs/dir_patt.py
# Note if $PYTHONPATH is not set then you can
# import from arbitrary locations like:
# import sys,os
# sys.path.append(os.environ['HOME']+'/libs/')
try:
from dir_patt import *
except:
pass
#pprint.pprint() doesn't put an item on each line
#even if width is small? See also:
#http://code.activestate.com/recipes/327142/
#also from reddit:
# ppdict = lambda d:"\n".join(map("%s: %s".__mod__, d.items()))
def ppdict(d):
"""Pretty Print for Dicts"""
print '{'
keys=d.keys()
keys.sort()
for k in keys:
spacing=" " * (16-(len(repr(k))+1))
print "%s:%s%s," % (repr(k),spacing,repr(d[k]))
print '}'
if 0: # Show info on startup
sys.stderr.write("Python %s\n" % sys.version.split('\n')[0])
sys.stderr.write("Tab completion on. Available items: %s\n" %
sorted(filter(lambda n: not n.startswith('_'), locals())))
| [
"P@draigBrady.com"
] | P@draigBrady.com | |
68616c0dbcebfbf9c42b5566168c88f7aa8c9404 | 7c2e677d931a8eb7d7cffc6d54713411abbe83e4 | /AppBuilder9000/AppBuilder9000/NflApp/migrations/0001_initial.py | c6c706f8bd214fbbea2270eca679fe35fce7be36 | [] | no_license | r3bunker/Python_Live_Project | 19e367b3cf74c2279c287fcd3a8a44a27f24041a | d3e06150d7daea6326cc1a4155309d99e4ff6244 | refs/heads/main | 2023-06-12T23:01:50.440371 | 2021-06-16T20:21:03 | 2021-06-16T20:21:03 | 344,883,966 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 901 | py | # Generated by Django 2.2.5 on 2020-11-06 15:17
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='PlayerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('position', models.CharField(choices=[('TE', 'TE'), ('QB', 'QB'), ('OL', 'OL'), ('DB', 'DB'), ('LB', 'LB'), ('WR', 'WR'), ('DL', 'DL'), ('RB', 'RB')], max_length=2)),
('name', models.CharField(default='', max_length=60)),
('height', models.PositiveIntegerField(max_length=3)),
('weight', models.PositiveIntegerField(max_length=3)),
('team', models.CharField(default='', max_length=30)),
],
),
]
| [
"r3bunker@gmail.com"
] | r3bunker@gmail.com |
de5084adaaef1f10cc0d68aac93cbaf753c45978 | b9368dbe9c2f4e5fe439e2ce4ec3d3bdca7f76f1 | /python beginning 8.1.py | 9ccc38db6dd0937ab916b16f466225d98b9fbb14 | [] | no_license | IliaMikhailov/python_begining | 027342ffda12ac03fe8aba530ddac36a8290c876 | f3e12cbf8eb880ee70346ab2d63b63cb327527f6 | refs/heads/master | 2023-05-29T11:09:57.718007 | 2021-06-13T10:35:09 | 2021-06-13T10:35:09 | 360,642,412 | 0 | 0 | null | 2021-06-13T10:35:09 | 2021-04-22T18:18:45 | Python | UTF-8 | Python | false | false | 793 | py | class Date:
def __init__(self, date):
self.date = date
@classmethod
def reformate_date(cls, date):
new_list = date.split('-')
day = int(new_list[0])
month = int(new_list[1])
year = int(new_list[2])
return day, month, year
@staticmethod
def check_date(day, month, year):
if (day > 0 and day < 32) and (month > 0 and month < 13) and (year > 0 and year < 2022):
print('дата введена правильно')
else:
print('дата введена неправильно')
if __name__ == '__main__':
Date.check_date(31, 12, 2021)
print(Date.reformate_date('02-11-2020'))
first = Date('01-01-2020')
print(first.reformate_date('03-03-2020'))
| [
"noreply@github.com"
] | IliaMikhailov.noreply@github.com |
dec404ac01d62b54c6eb62d1a30d66e8391539e6 | 1d4d615fa3c68736266663baadff0d60c5728850 | /tobe/bot/games/types.py | 9a90617895a73b8a0a6605620b50582ac0063d5f | [] | no_license | ArtjomKotkov/Tobe | 2f4471f7825003ba6b5b124742aa704f590e6e1d | c52dc527b9d4ae83b2a4debca8f18fcc9f3f3080 | refs/heads/master | 2022-12-17T14:49:59.482850 | 2020-09-06T19:50:35 | 2020-09-06T19:50:35 | 292,708,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,098 | py | from ..types import BaseType
from ..base.types import PhotoSize, MessageEntity, Animation, User
class Game(BaseType):
"""This object represents a game. Use BotFather to create and edit games, their short names will act as unique identifiers.
Parameters
----------
title : String
Title of the game
description : String
Description of the game
photo : Array of PhotoSize
Photo that will be displayed in the game message in chats.
text : String, optional
Brief description of the game or high scores included in the game message. Can be automatically edited to include current high scores for the game when the bot calls setGameScore, or manually edited using editMessageText. 0-4096 characters.
text_entities : Array of MessageEntity, optional
Special entities that appear in text, such as usernames, URLs, bot commands, etc.
animation : Animation, optional
Animation that will be displayed in the game message in chats. Upload via BotFather
"""
def __init__(self, title,
description,
photo,
text=None,
text_entities=None,
animation=None):
super().__init__()
self.title = title
self.description = description
self.photo = PhotoSize.parse(photo, iterable=True)
self.text = text
self.text_entities = MessageEntity.parse(text_entities, iterable=True)
self.animation = Animation.parse(animation, iterable=True)
class GameHighScore(BaseType):
"""This object represents one row of the high scores table for a game.
Parameters
----------
position : Integer
Position in high score table for the game
user : User
User
score : Integer
Score
"""
def __init__(self, position,
user,
score):
super().__init__()
self.position = position
self.user = User.parse(user)
self.score = score
| [
"gogagun0@gmail.com"
] | gogagun0@gmail.com |
76e31ee753accb6937d8800000f3bbc5a28dabe6 | 8a9f0a0924706ded24ab4214aa42ab07f201e38b | /LeetCode_Python/Linked_List/Swap_Nodes_In_Pairs.py | 030136ef60b1879da3ce6eb6cdd836e2dfdd49ae | [] | no_license | gitzx/Data-Structure-Algorithm | 687162565729b12551cb660aa55a94f1d382014c | d6af7dfdc4d3d139fd939687a45dd36e327c914c | refs/heads/master | 2021-06-03T21:27:17.750464 | 2019-06-27T10:50:48 | 2019-06-27T10:50:48 | 14,443,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 690 | py | '''
Given a linked list, swap every two adjacent nodes and return its head.
For example,
Given 1->2->3->4, you should return the list as 2->1->4->3.
Your algorithm should use only constant space. You may not modify the values in the list, only nodes itself can be changed.
'''
'''
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
'''
class Solution(object):
def swapPairs(self, head):
if head == None or head.next == None:
return head
dummy = ListNode(0)
dummy.next = head
p = dummy
while p.next and p.next.next:
tmp = p.next.next
p.next.next = tmp.next
tmp.next = p.next
p.next = tmp
p = p.next.next
return dummy.next
| [
"emailzhx@126.com"
] | emailzhx@126.com |
04ae40874bbae6356e0a4b55a41728bad5cc3670 | 0b2a5df3447b7427b6da3705513de2b825792222 | /djangoproject/sinbike/sinbike_CX/migrations/0004_auto_20210522_1620.py | 292009a69bbd446aedd41842186b69d73cd55160 | [] | no_license | youtube-dm94/Sinbike | 7599af3c4673bcd95ea1aa56fdd68b4cfb0f8b5f | 69afcb86a9dcb30951bdc01f596c7e27a54b332b | refs/heads/main | 2023-07-12T11:59:52.491953 | 2021-08-14T08:30:42 | 2021-08-14T08:30:42 | 362,273,684 | 0 | 2 | null | 2021-08-03T16:17:13 | 2021-04-27T22:56:46 | Python | UTF-8 | Python | false | false | 567 | py | # Generated by Django 3.2 on 2021-05-22 08:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sinbike_CX', '0003_answer_author'),
]
operations = [
migrations.AddField(
model_name='answer',
name='modify_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='question',
name='modify_date',
field=models.DateTimeField(blank=True, null=True),
),
]
| [
"63930715+youtube-dm94@users.noreply.github.com"
] | 63930715+youtube-dm94@users.noreply.github.com |
b8760e44c9b37c312a00f01f06b0f1d1992247d0 | 28b405b8a538187367e019e45dd7fff3c5f4f296 | /src/rocks-pylib/rocks/commands/set/host/interface/vlan/__init__.py | 6224fb0a1b94d913a4014f8e6961bc95b0bc6627 | [] | no_license | rocksclusters/core | 95c84cbe4d9f998eea123177e43b25fa0475c823 | 7fb7208aa4a532e64db83e04759d941be9b96d91 | refs/heads/master | 2023-04-08T16:30:45.931720 | 2023-03-23T17:18:54 | 2023-03-23T17:18:54 | 58,084,820 | 21 | 11 | null | 2019-08-22T21:17:23 | 2016-05-04T21:21:17 | Python | UTF-8 | Python | false | false | 5,142 | py | # $Id: __init__.py,v 1.10 2012/11/27 00:48:28 phil Exp $
#
# @Copyright@
#
# Rocks(r)
# www.rocksclusters.org
# version 6.2 (SideWinder)
# version 7.0 (Manzanita)
#
# Copyright (c) 2000 - 2017 The Regents of the University of California.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice unmodified and in its entirety, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. All advertising and press materials, printed or electronic, mentioning
# features or use of this software must display the following acknowledgement:
#
# "This product includes software developed by the Rocks(r)
# Cluster Group at the San Diego Supercomputer Center at the
# University of California, San Diego and its contributors."
#
# 4. Except as permitted for the purposes of acknowledgment in paragraph 3,
# neither the name or logo of this software nor the names of its
# authors may be used to endorse or promote products derived from this
# software without specific prior written permission. The name of the
# software includes the following terms, and any derivatives thereof:
# "Rocks", "Rocks Clusters", and "Avalanche Installer". For licensing of
# the associated name, interested parties should contact Technology
# Transfer & Intellectual Property Services, University of California,
# San Diego, 9500 Gilman Drive, Mail Code 0910, La Jolla, CA 92093-0910,
# Ph: (858) 534-5815, FAX: (858) 534-7345, E-MAIL:invent@ucsd.edu
#
# THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# @Copyright@
#
# $Log: __init__.py,v $
# Revision 1.10 2012/11/27 00:48:28 phil
# Copyright Storm for Emerald Boa
#
# Revision 1.9 2012/08/23 16:42:07 clem
# set host interface vlan and set host interface subnet did not accept properly
# MAC addresses for their iface input argument
#
# Revision 1.8 2012/05/06 05:48:35 phil
# Copyright Storm for Mamba
#
# Revision 1.7 2011/07/23 02:30:38 phil
# Viper Copyright
#
# Revision 1.6 2010/09/07 23:53:01 bruno
# star power for gb
#
# Revision 1.5 2009/07/28 17:52:20 bruno
# be consistent -- all references to 'vlanid' should be 'vlan'
#
# Revision 1.4 2009/05/01 19:07:03 mjk
# chimi con queso
#
# Revision 1.3 2009/01/08 01:20:57 bruno
# for anoop
#
# Revision 1.2 2008/10/18 00:55:57 mjk
# copyright 5.1
#
# Revision 1.1 2008/07/22 00:34:41 bruno
# first whack at vlan support
#
#
#
import rocks.commands
class Command(rocks.commands.set.host.command):
"""
Sets the VLAN ID for an interface on one of more hosts.
<arg type='string' name='host' repeat='1'>
One or more named hosts.
</arg>
<arg type='string' name='iface'>
Interface that should be updated. This may be a logical interface or
the mac address of the interface.
</arg>
<arg type='string' name='vlan'>
The VLAN ID that should be updated. This must be an integer and the
pair 'subnet/vlan' must be defined in the VLANs table.
</arg>
<param type='string' name='iface'>
Can be used in place of the iface argument.
</param>
<param type='string' name='vlan'>
Can be used in place of the vlan argument.
</param>
<example cmd='set host interface vlan compute-0-0-0 eth0 3'>
Sets compute-0-0-0's private interface to VLAN ID 3.
</example>
<example cmd='set host interface vlan compute-0-0-0 subnet=eth0 vlan=3
'>
Same as above.
</example>
<related>add host</related>
"""
def run(self, params, args):
(args, iface, vid) = self.fillPositionalArgs(
('iface', 'vlan'))
if not len(args):
self.abort('must supply host')
if not iface:
self.abort('must supply iface')
if not vid:
self.abort('must supply vlan')
else:
try:
vlanid = int(vid)
except:
self.abort('vlan "%s" must be an integer' %
(vid))
for host in self.getHostnames(args):
self.db.execute("""update networks net, nodes n
set net.vlanid = IF(%d = 0, NULL, %d)
where (net.device = '%s' or net.mac='%s') and
n.name = '%s' and net.node = n.id""" %
(vlanid, vlanid, iface, iface, host))
| [
"ppapadopoulos@ucsd.edu"
] | ppapadopoulos@ucsd.edu |
7cde89c583ba50f4aa380cf58fbdc5cf4eab4913 | 96783580b4710e11215172fc2dc6007956f87d8f | /sharing_fixtures/1/conftest.py | 1050544ca35f9a26a34ea202842e643991b001f7 | [] | no_license | stephenfrench9/pytest-examples | e22f44e350ec626547ba24d97d586c2df72388ee | 980af458c1813c4f80e8c5e13c63910a52c9f1b8 | refs/heads/master | 2023-08-14T03:12:38.962181 | 2021-10-11T02:08:58 | 2021-10-11T02:08:58 | 371,434,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 75 | py | import pytest
@pytest.fixture(scope = 'module')
def list0():
return [] | [
"stephen.french@invitae.com"
] | stephen.french@invitae.com |
9896ed4a15946204d46a0faecec93ee19b1562de | 15373eaa353e8aece47a26741b7fb27795268bf6 | /easy/674_longest_continuous_increasing_subsequence.py | ef6d6b79c989164a5d0abafb804820ca0af2c060 | [] | no_license | esddse/leetcode | e1a9bacf04c68a8d642a1e53c90e6c2dda2c1980 | 0ceccdb262149f7916cb30fa5f3dae93aef9e9cd | refs/heads/master | 2021-06-08T19:15:14.346584 | 2020-01-09T01:41:23 | 2020-01-09T01:41:23 | 109,675,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | class Solution:
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if not nums:
return 0
N = len(nums)
dp = [1] * N
max_len = 1
for i in range(1, N):
if nums[i] > nums[i-1]:
dp[i] = dp[i-1] + 1
max_len = max(max_len, dp[i])
return max_len | [
"tjz427@sina.cn"
] | tjz427@sina.cn |
c78fca675d5676273ac2feefb58558b427a6339b | 74e53273dc5aa71293a385512b3d239971099738 | /Data_structures_and_Algorithms/linked_list/odd_even_linked_list.py | 23a4c71690de5d036acb1edf0b4d3ec4ea4b1b76 | [] | no_license | BJV-git/Data_structures_and_Algorithms | 3b240bf699e7091453f3a1459b06da1af050c415 | 393c504b2bb17b19e76f6d9d9cce948b4c12dbb2 | refs/heads/master | 2020-04-23T22:32:22.525542 | 2019-02-19T16:09:51 | 2019-02-19T16:09:51 | 171,504,949 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 418 | py | # just about the positional ordering
# just can go by the next.next and set the odd last to even head
def odd_even(head):
if not head or not head.next: return head
odd =head
even = head.next
evenhead = head.next
while even and even.next:
odd.next = odd.next.next
odd = odd.next
even.next = even.next.next
even = even.next
odd.next = evenhead
return head | [
"noreply@github.com"
] | BJV-git.noreply@github.com |
6bd25e5c767e722a7e05735eb55585d8e58dddfb | c67b58dcf1b4ee7f95e825fae85831d4acac3fdf | /tests_api.py | 39e8a05931b82e3a9204a217da5081adb07f9956 | [
"MIT"
] | permissive | PragmaticCoder/django_api_tests | 85f9a238b3b63e9fe8e64197a6477bc4fe0b1899 | f5e4667a3f82eff7cfb8df8d67597b6879c1a445 | refs/heads/master | 2021-05-18T13:18:12.914853 | 2020-03-30T09:33:46 | 2020-03-30T09:33:46 | 251,259,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,098 | py | from unittest.mock import patch
import requests
from django.test import TestCase
from django.urls import reverse
from model_mommy import mommy
from rest_framework import status
from backend.posts.models import Post, Page
from backend.posts.tests import FACEBOOK_PAGE_ID, FACEBOOK_PAGE_NAME, MESSAGE, TEST_URL
class TestCreatePosts(TestCase):
def setUp(self):
self.URL_ENDPOINT = reverse('api-posts:create', kwargs={'version': 1})
self.user = mommy.make('GrowthUser', is_api_user=True, _fill_optional=True)
self.user_profile = mommy.make('UserProfile', user=self.user, _fill_optional=True)
self.json = {
'token': self.user.login_token,
'page_id': FACEBOOK_PAGE_ID,
'page_name': FACEBOOK_PAGE_NAME,
'message': MESSAGE,
'zapier_url': TEST_URL,
}
@patch('backend.posts.api.create_facebook_post', spec=requests.Response)
@patch('backend.posts.api.get_page_image', return_value=TEST_URL)
def test_success(self, mock_create_facebook_post, mock_get_page_image):
response = self.client.post(self.URL_ENDPOINT, self.json)
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertTrue(mock_create_facebook_post.called)
self.assertTrue(mock_get_page_image.called)
self.assertTrue(mock_create_facebook_post.return_value, requests.Response)
self.assertTrue(mock_get_page_image.return_value, requests.Response)
@patch('backend.posts.api.create_facebook_post', spec=requests.Response)
@patch('backend.posts.api.get_page_image', return_value=TEST_URL)
def test_success_on_existing_page(self, mock_create_facebook_post, mock_get_page_image):
page = mommy.make('Page', _fill_optional=True)
self.json['page_id'] = page.facebook_id
response = self.client.post(self.URL_ENDPOINT, self.json)
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(Post.objects.count(), 1)
@patch('backend.posts.api.create_facebook_post', spec=requests.Response)
@patch('backend.posts.api.get_page_image', return_value=TEST_URL)
def test_creates_new_post(self, mock_create_facebook_post, mock_get_page_image):
response = self.client.post(self.URL_ENDPOINT, self.json)
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(Page.objects.count(), 1)
@patch('backend.posts.api.create_facebook_post', spec=requests.Response)
@patch('backend.posts.api.get_page_image', return_value=TEST_URL)
def test_creates_new_page(self, mock_create_facebook_post, mock_get_page_image):
response = self.client.post(self.URL_ENDPOINT, self.json)
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(Page.objects.count(), 1)
@patch('backend.posts.api.create_facebook_post', spec=requests.Response)
@patch('backend.posts.api.get_page_image', return_value=TEST_URL)
def test_success_when_page_zapier_url_set(self, mock_create_facebook_post, mock_get_page_image):
response = self.client.post(self.URL_ENDPOINT, self.json)
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(Page.objects.count(), 1)
@patch('backend.posts.api.create_facebook_post', spec=requests.Response)
@patch('backend.posts.api.get_page_image', return_value=TEST_URL)
def test_updates_page_zapier_url(self, mock_create_facebook_post, mock_get_page_image):
self.json['zapier_url'] = 'new-url.com'
response = self.client.post(self.URL_ENDPOINT, self.json)
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(Page.objects.first().zapier_url, self.json['zapier_url'])
@patch('backend.posts.api.create_facebook_post', spec=requests.Response)
@patch('backend.posts.api.get_page_image', return_value=TEST_URL)
def test_returns_400_when_page_zapier_url_not_set(self, mock_create_facebook_post, mock_get_page_image):
del self.json['zapier_url']
response = self.client.post(self.URL_ENDPOINT, self.json)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch('backend.posts.api.create_facebook_post', spec=requests.Response)
@patch('backend.posts.api.get_page_image', return_value=None)
def test_returns_400_when_page_zapier_url_not_set(self, mock_create_facebook_post, mock_get_page_image):
del self.json['zapier_url']
response = self.client.post(self.URL_ENDPOINT, self.json)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_missing_param(self):
del self.json['page_id']
response = self.client.post(self.URL_ENDPOINT, self.json)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_unauthorized_access_returns_404(self):
del self.json['token']
response = self.client.post(self.URL_ENDPOINT, self.json)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
| [
"noreply@github.com"
] | PragmaticCoder.noreply@github.com |
c2481ea28e5b5929e2a260f3be981bd194a7d641 | 3787605175759227bd22cae316e71e7a63a63a6f | /pythonex/ex3.py | 6958bd8227c9705a07e05bfc4452bbe70d987f8b | [] | no_license | YiMiWang810/python | 5ae5125e369bb02385d3262345920f81a3672ae2 | f98c2d90ca1d60dd88e314b54a188d9f9a257a12 | refs/heads/master | 2022-02-21T17:02:01.019152 | 2019-10-06T13:46:51 | 2019-10-06T13:46:51 | 124,026,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | for i in range(1,5):
for j in range(1,5):
for k in range(1,5):
if(i!=k)and(i!=j)and(j!=k):
print(i,j,k) | [
"wangbingjie@qq.com"
] | wangbingjie@qq.com |
f422ec4d99886f7b93be2b0fba9b49a04225bdcf | 8106fc53fd1d15a5e62bd1b422f0dbe50c91b5fd | /a9/treenode.py | 7c9a9ffdbed0102403d957a81778061238157bb2 | [] | no_license | germ/cmp145 | 4060c7124e6d350173848b434a2fa4100cc228dc | 083f7e3761fe4f17c9f61de9ac42cf717d03cb67 | refs/heads/master | 2022-12-08T09:15:33.171941 | 2020-09-03T08:52:40 | 2020-09-03T08:52:40 | 278,132,304 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,157 | py | # CMPT 145 Course material
# Copyright (c) 2017-2020 Michael C Horsch
# All rights reserved.
#
# This document contains resources for homework assigned to students of
# CMPT 145 and shall not be distributed without permission. Posting this
# file to a public or private website, or providing this file to a person
# not registered in CMPT 145, constitutes Academic Misconduct, according
# to the University of Saskatchewan Policy on Academic Misconduct.
#
# Synopsis:
# Defines the tree node ADT
#
# A treenode is a simple container with three pieces of information
# data: the contained information
# left: a reference to another treenode or None
# right: a reference to another treenode or None
class treenode(object):
def __init__(self, data, left=None, right=None):
"""
Create a new treenode for the given data.
Pre-conditions:
data: Any data value to be stored in the treenode
left: Another treenode (or None, by default)
right: Another treenode (or None, by default)
"""
self.data = data
self.left = left
self.right = right
| [
"jeremythegeek@gmail.com"
] | jeremythegeek@gmail.com |
80099c5b8ad73bfd5279f2df830b692a9f03d7c9 | 97cfa663a3c90404094cca0fca0936e3edc206cf | /bin/pildriver.py | f5adc136d85e320db568722798c34ac71a809fa7 | [] | no_license | ArchAiA/ecommerce | 267d56d59db0eeaf3bf1a691c82bc744cc55b706 | 0e35dd3ac25e4c41e196233dd035563ca337291b | refs/heads/master | 2020-05-20T04:54:29.685586 | 2015-01-27T02:50:59 | 2015-01-27T02:50:59 | 28,702,724 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,622 | py | #!/home/david/projects/ecommerce/bin/python
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <esr@thyrsus.com>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver:
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack = [item] + self.stack
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
top = self.stack[0]
self.stack = self.stack[1:]
return top
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.stack = [dup] + self.stack
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1> [<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower> <image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
filter = eval("ImageFilter." + self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(filter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset> <image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
try:
import readline
except ImportError:
pass # not available on all platforms
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
| [
"xarchaiax@gmail.com"
] | xarchaiax@gmail.com |
10c08ee01135eb368278772c43326a847c319818 | 30d1b89b67d48efdacce5bceeee2c734bee2b509 | /manual_translation/build/mavros/catkin_generated/pkg.develspace.context.pc.py | 9355e7e6b2a3491d05ae969e2d0b01ad1bcdb6e5 | [] | no_license | ParthGaneriwala/uppaal2ros | db4a6b20c78e423511e565477a2461942c2adceb | f88b2b860b0b970b61110a323d0397352785c9e2 | refs/heads/main | 2023-02-20T19:36:22.406515 | 2021-01-28T18:58:44 | 2021-01-28T18:58:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 915 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/adi/ardu_ws/src/mavros/mavros/include;/usr/include;/usr/include/eigen3".split(';') if "/home/adi/ardu_ws/src/mavros/mavros/include;/usr/include;/usr/include/eigen3" != "" else []
PROJECT_CATKIN_DEPENDS = "diagnostic_msgs;diagnostic_updater;eigen_conversions;geographic_msgs;geometry_msgs;libmavconn;mavros_msgs;message_runtime;nav_msgs;pluginlib;roscpp;sensor_msgs;std_msgs;tf2_ros;trajectory_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lmavros;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libGeographic.so".split(';') if "-lmavros;/usr/lib/x86_64-linux-gnu/libboost_system.so;/usr/lib/x86_64-linux-gnu/libGeographic.so" != "" else []
PROJECT_NAME = "mavros"
PROJECT_SPACE_DIR = "/home/adi/ardu_ws/devel/.private/mavros"
PROJECT_VERSION = "1.4.0"
| [
"adithyatp@yahoo.com"
] | adithyatp@yahoo.com |
4989b14c294d2a2ac4e3e1730a18779c4bec132b | 2eaf2fee5dce43aa2afebe9f92fef2eb90c50acc | /utils/formatter.py | 4aaaa2ec1e23c3fc3bc2c07994681d552028f6b5 | [] | no_license | xiaobo4853464/gen_interfaces_by_python_pb | ada96c13b8e42e9e41733298b4ca512fc3d0fcfd | d2d3658abaf1c10f8e52de43cc7eb8138fd022f7 | refs/heads/master | 2023-07-11T12:46:59.454303 | 2021-08-25T05:59:45 | 2021-08-25T05:59:45 | 399,413,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | def to_underline(text):
lst = []
for index, char in enumerate(text):
if char.isupper() and index != 0:
lst.append("_")
lst.append(char)
return "".join(lst).lower()
| [
"xiaobo@bilibili.com"
] | xiaobo@bilibili.com |
a90188d57d5de7480ddf16ca04b2ed4479b6d35c | a104169468c4d8218e0b2e872c8d29ebcb619cb0 | /factorial recursion.py | 6ce0bb5f44a5acbb861eac045f2512e20ba06d0e | [] | no_license | jimmyjiang914/Python-Coding-Challenges-and-Projects | 0972ca41284a3b9dd4852249b486ff3363458eb4 | aeb5496127b34aee8da130ae19c95852b73b84df | refs/heads/master | 2020-04-15T01:33:42.867671 | 2019-03-05T01:39:07 | 2019-03-05T01:39:07 | 164,281,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 132 | py | def factorial(num):
if num <= 0:
return 1
fact = num * int(factorial(num - 1))
return fact
print(factorial(10)) | [
"jimmyjiang914@gmail.com"
] | jimmyjiang914@gmail.com |
29eb3fd7ab1c3b7429174b8f207792f27f12a82e | 67829789237e262733ac8e04035a136bb1a28574 | /Diplom/venv/Scriptses/Socket_server.py | e079431e0d64bddd60bb38ba63847ed42308c706 | [] | no_license | ATOM-Games/SystemModule | fe964554518314501f504a1cb02d5d277e5369f4 | 0dfc01c3f69765a54b2f2013d5054b13a3eed9e6 | refs/heads/master | 2023-05-13T09:00:39.284312 | 2021-06-10T13:48:01 | 2021-06-10T13:48:01 | 363,838,762 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | import socket, cv2, pickle, struct
#def createSocket(c_ip, c_pt):
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
#host_ip = c_ip
port = 5001
#port = c_pt
socket_address = (host_ip, port)
server_socket.bind(socket_address)
server_socket.listen(5)
print("Listen : socket address ", socket_address)
while True:
client_socket,addr = server_socket.accept()
print("GOT CONNECTION : ", addr)
if client_socket:
vid = cv2.VideoCapture(0)
while (vid.isOpened()):
img,frame = vid.read()
a = pickle.dumps(frame)
message = struct.pack("Q", len(a)) + a | [
"silatrotila0atom@gmail.com"
] | silatrotila0atom@gmail.com |
ffcbc42c2ce96ab967b9116f34e8812bdb1d4389 | 9661a19548c9886beb4965f8b404fc61f0f6831e | /murcs/solution_Load.py | f7659fd800072d2153eb4597a3453a8cde30f272 | [] | no_license | dirkhpe/bv | d3ee2f43ac1cc0b14b38b40417adbd96335db818 | 7725ebc01b3b981897f018a5e81bfd8a62dea11d | refs/heads/master | 2022-01-31T00:30:46.056907 | 2019-06-05T08:30:05 | 2019-06-05T08:30:05 | 107,697,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | """
This script will load a solution file.
"""
import argparse
import logging
import pandas
from lib import my_env
from lib import murcsrest
if __name__ == "__main__":
# Configure command line arguments
parser = argparse.ArgumentParser(
description="Load a Server file into Murcs"
)
parser.add_argument('-f', '--filename', type=str, required=True,
help='Please provide the solution file to load.')
args = parser.parse_args()
cfg = my_env.init_env("bellavista", __file__)
r = murcsrest.MurcsRest(cfg)
logging.info("Arguments: {a}".format(a=args))
# Read the file
df = pandas.read_excel(args.filename)
my_loop = my_env.LoopInfo("Solutions", 20)
for row in df.iterrows():
my_loop.info_loop()
# Get excel row in dict format
xl = row[1].to_dict()
solId = xl["solutionId"]
xl.pop("clientId")
payload = dict(
solId=solId
)
for k in xl:
if pandas.notnull(xl[k]):
payload[k] = xl[k]
r.add_sol(solId, payload)
my_loop.end_loop()
| [
"dirk.vermeylen@skynet.be"
] | dirk.vermeylen@skynet.be |
c50a065909fa75ed776634d4d0a5b310ec4664b3 | d9187f7b4f2f43c554b7bca8ffb4b70693d42027 | /gcal/config/desktop.py | 958ba9197a1e6b5034d067e7d62a4f288fdb7a80 | [
"MIT"
] | permissive | sahil19896/test | 373de8cf00240542c6102d4b8d24848eb4ad5c1e | 363312af9777c2429130d2e0f0277a63554ef7ed | refs/heads/master | 2021-05-05T05:25:47.474564 | 2018-01-24T04:13:20 | 2018-01-24T04:13:20 | 82,763,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"module_name": "Gcal Sync",
"color": "grey",
"icon": "fa fa-calendar",
"type": "module",
"label": _("Gcal Sync")
}
]
| [
"sahil19896@gmail.com"
] | sahil19896@gmail.com |
271ccaf487e3b9e2b41a247f37bd6b851cace20f | 728af4b851fec43517d0754fddbb8b54f8c69f5b | /lag/locations/migrations/0002_auto__add_placetype__chg_field_lair_created__add_field_place_placetype.py | 007177829ecb19bed1846accf7535dabcd891d74 | [] | no_license | davidmiller/lag | ef389fff435536fc7fa3405244575dc8409208e2 | 33a57ea5ad83965822761ad55914c48c3e486d4d | refs/heads/master | 2016-09-11T03:22:05.501100 | 2011-04-29T16:11:10 | 2011-04-29T16:11:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,317 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'PlaceType'
db.create_table('locations_placetype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal('locations', ['PlaceType'])
# Changing field 'Lair.created'
db.alter_column('locations_lair', 'created', self.gf('django.db.models.fields.DateTimeField')())
# Adding field 'Place.placetype'
db.add_column('locations_place', 'placetype', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['locations.PlaceType'], null=True, blank=True), keep_default=False)
# Adding field 'Place.visits'
db.add_column('locations_place', 'visits', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Place.unique_visitors'
db.add_column('locations_place', 'unique_visitors', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Place.items_found'
db.add_column('locations_place', 'items_found', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Changing field 'Place.created'
db.alter_column('locations_place', 'created', self.gf('django.db.models.fields.DateTimeField')())
def backwards(self, orm):
# Deleting model 'PlaceType'
db.delete_table('locations_placetype')
# Changing field 'Lair.created'
db.alter_column('locations_lair', 'created', self.gf('django.db.models.fields.DateField')())
# Deleting field 'Place.placetype'
db.delete_column('locations_place', 'placetype_id')
# Deleting field 'Place.visits'
db.delete_column('locations_place', 'visits')
# Deleting field 'Place.unique_visitors'
db.delete_column('locations_place', 'unique_visitors')
# Deleting field 'Place.items_found'
db.delete_column('locations_place', 'items_found')
# Changing field 'Place.created'
db.alter_column('locations_place', 'created', self.gf('django.db.models.fields.DateField')())
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'locations.lair': {
'Meta': {'object_name': 'Lair'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'lair_createdby'", 'null': 'True', 'to': "orm['players.Player']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lon': ('django.db.models.fields.FloatField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['locations.Region']", 'null': 'True', 'blank': 'True'})
},
'locations.place': {
'Meta': {'object_name': 'Place'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['players.Player']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'items_found': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lon': ('django.db.models.fields.FloatField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'placetype': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['locations.PlaceType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {}),
'region': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['locations.Region']", 'null': 'True', 'blank': 'True'}),
'unique_visitors': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'visits': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'locations.placetype': {
'Meta': {'object_name': 'PlaceType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'locations.region': {
'Meta': {'object_name': 'Region'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {}),
'lon': ('django.db.models.fields.FloatField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'locations.visit': {
'Meta': {'object_name': 'Visit'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_visited': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today', 'null': 'True'}),
'place': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['locations.Place']"}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['players.Player']"}),
'visits': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'players.player': {
'Meta': {'object_name': 'Player'},
'firstname': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'has_lair': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lairs': ('django.db.models.fields.related.ManyToManyField', [], {'default': 'None', 'to': "orm['locations.Lair']", 'null': 'True', 'symmetrical': 'False', 'blank': 'True'}),
'profile_pic': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'short_bio': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['locations']
| [
"david@rasputin.(none)"
] | david@rasputin.(none) |
7b3118281a9061e8454ba7aa096b0e07c96dd9f7 | 72d95f3b8c4de3e76c1125b2e6573539cadbe66a | /robot/logging.py | bf0f763b3df4200826a487da9e41562cb5cc3fba | [
"MIT"
] | permissive | skyzhishui/wukong-robot | 4b2c4255fffa61705b735fed57fd0c90c4d40d7c | 8395cd4030d340459edd7862b186a6f9395925ff | refs/heads/master | 2020-12-23T03:37:03.320598 | 2020-01-29T16:03:04 | 2020-01-29T16:03:04 | 237,020,512 | 2 | 0 | MIT | 2020-01-29T15:48:47 | 2020-01-29T15:48:46 | null | UTF-8 | Python | false | false | 2,197 | py | import logging
import os
from robot import constants
from logging.handlers import RotatingFileHandler
PAGE = 4096
DEBUG = logging.DEBUG
INFO = logging.INFO
WARNING = logging.WARNING
ERROR = logging.ERROR
def tail(filepath, n=10):
"""
实现 tail -n
"""
res = ""
with open(filepath, 'rb') as f:
f_len = f.seek(0, 2)
rem = f_len % PAGE
page_n = f_len // PAGE
r_len = rem if rem else PAGE
while True:
# 如果读取的页大小>=文件大小,直接读取数据输出
if r_len >= f_len:
f.seek(0)
lines = f.readlines()[::-1]
break
f.seek(-r_len, 2)
# print('f_len: {}, rem: {}, page_n: {}, r_len: {}'.format(f_len, rem, page_n, r_len))
lines = f.readlines()[::-1]
count = len(lines) -1 # 末行可能不完整,减一行,加大读取量
if count >= n: # 如果读取到的行数>=指定行数,则退出循环读取数据
break
else: # 如果读取行数不够,载入更多的页大小读取数据
r_len += PAGE
page_n -= 1
for line in lines[:n][::-1]:
res += line.decode('utf-8')
return res
def getLogger(name):
"""
作用同标准模块 logging.getLogger(name)
:returns: logger
"""
formatter = logging.Formatter('%(asctime)s - %(name)s - %(filename)s - %(funcName)s - line %(lineno)s - %(levelname)s - %(message)s')
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
# FileHandler
file_handler = RotatingFileHandler(os.path.join(constants.TEMP_PATH, 'wukong.log'), maxBytes=1024*1024,backupCount=5)
file_handler.setLevel(level=logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def readLog(lines=200):
"""
获取最新的指定行数的 log
:param lines: 最大的行数
:returns: 最新指定行数的 log
"""
log_path = os.path.join(constants.TEMP_PATH, 'wukong.log')
if os.path.exists(log_path):
return tail(log_path, lines)
return ''
| [
"m@hahack.com"
] | m@hahack.com |
8d91c7ad348d6353cd3e36d304f23edccb0e3233 | 645548c3e613ee163a2b573475f4b3c47ac1d129 | /Day26/listComprehension.py | fc5215b181e4903966a6332095b0437d2b218fea | [] | no_license | TrellixVulnTeam/Python-Development_PSHU | e61efea1119d3f81f814d5608d7fba3f0325989d | 3cdd783f9cdc08711a3c5f6d81cb40cb6b8ec0c1 | refs/heads/master | 2023-03-20T03:51:48.000715 | 2022-10-31T15:27:38 | 2022-10-31T15:27:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | print('hi')
new_list = [n+1 for n in range(1,5)]
new_list = [n*2 for n in range(1,5)]
names = ['alex', 'beth', 'caroline', 'dave', 'elanor', 'freddii']
names
['alex', 'beth', 'caroline', 'dave', 'elanor', 'freddie']
upper_names = [name.upper() for name in names if len(name)>5]
upper_names
['CAROLINE', 'ELANOR', 'FREDDIE']
numbers = [1,1,2,3,5,8,13,21,34,55]
result = [num for num in numbers if num % 2 == 0]
print(result) | [
"spurti20oct@gmail.com"
] | spurti20oct@gmail.com |
0c5c28f1f073e1781612b05f04198eed724d49cc | 7c756e1949c930be6c70ddef7900610f78af17df | /node_modules/rpio/build/config.gypi | cefab2d02e5ef3d87269c368959c4d82c2bc45c4 | [
"ISC",
"GPL-2.0-only",
"GPL-1.0-or-later",
"MIT"
] | permissive | machigattan/Wortschatz | b328df080e520eb5b693704fd718ad3a336bd2c5 | 171ba76bebdfd5feaa41e3aa0d07d73988c72c9a | refs/heads/main | 2023-06-19T10:15:39.292594 | 2021-07-14T21:40:02 | 2021-07-14T21:40:02 | 382,640,528 | 0 | 0 | MIT | 2021-07-13T14:52:54 | 2021-07-03T14:52:09 | null | UTF-8 | Python | false | false | 5,744 | gypi | # Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"arm_float_abi": "hard",
"arm_fpu": "vfpv3",
"arm_thumb": 0,
"arm_version": "7",
"asan": 0,
"build_v8_with_gn": "false",
"coverage": "false",
"dcheck_always_on": 0,
"debug_nghttp2": "false",
"debug_node": "false",
"enable_lto": "false",
"enable_pgo_generate": "false",
"enable_pgo_use": "false",
"error_on_warn": "false",
"force_dynamic_crt": 0,
"gas_version": "2.28",
"host_arch": "ia32",
"icu_data_in": "../../deps/icu-tmp/icudt69l.dat",
"icu_endianness": "l",
"icu_gyp_path": "tools/icu/icu-generic.gyp",
"icu_path": "deps/icu-small",
"icu_small": "false",
"icu_ver_major": "69",
"is_debug": 0,
"llvm_version": "0.0",
"napi_build_version": "8",
"node_byteorder": "little",
"node_debug_lib": "false",
"node_enable_d8": "false",
"node_install_npm": "true",
"node_module_version": 83,
"node_no_browser_globals": "false",
"node_prefix": "/",
"node_release_urlbase": "https://nodejs.org/download/release/",
"node_section_ordering_info": "",
"node_shared": "false",
"node_shared_brotli": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_nghttp2": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_target_type": "executable",
"node_use_bundled_v8": "true",
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_node_code_cache": "false",
"node_use_node_snapshot": "false",
"node_use_openssl": "true",
"node_use_v8_platform": "true",
"node_with_ltcg": "false",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_is_fips": "false",
"ossfuzz": "false",
"shlib_suffix": "so.83",
"target_arch": "arm",
"v8_enable_31bit_smis_on_64bit_arch": 0,
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_enable_lite_mode": 0,
"v8_enable_object_print": 1,
"v8_enable_pointer_compression": 0,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 1,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_siphash": 1,
"want_separate_host_toolset": 1,
"nodedir": "/home/pi/.cache/node-gyp/14.17.2",
"standalone_static_library": 1,
"cache_lock_stale": "60000",
"ham_it_up": "",
"legacy_bundling": "",
"sign_git_tag": "",
"user_agent": "npm/6.14.13 node/v14.17.2 linux arm",
"always_auth": "",
"bin_links": "true",
"key": "",
"allow_same_version": "",
"description": "true",
"fetch_retries": "2",
"heading": "npm",
"if_present": "",
"init_version": "1.0.0",
"user": "",
"prefer_online": "",
"force": "",
"only": "",
"read_only": "",
"cache_min": "10",
"init_license": "ISC",
"editor": "vi",
"rollback": "true",
"tag_version_prefix": "v",
"cache_max": "Infinity",
"timing": "",
"userconfig": "/home/pi/.npmrc",
"engine_strict": "",
"init_author_name": "",
"init_author_url": "",
"preid": "",
"tmp": "/tmp",
"depth": "Infinity",
"package_lock_only": "",
"save_dev": "",
"usage": "",
"metrics_registry": "https://registry.npmjs.org/",
"otp": "",
"package_lock": "true",
"progress": "true",
"https_proxy": "",
"save_prod": "",
"audit": "true",
"cidr": "",
"onload_script": "",
"sso_type": "oauth",
"rebuild_bundle": "true",
"save_bundle": "",
"shell": "/bin/bash",
"dry_run": "",
"format_package_lock": "true",
"prefix": "/usr",
"scope": "",
"browser": "",
"cache_lock_wait": "10000",
"ignore_prepublish": "",
"registry": "https://registry.npmjs.org/",
"save_optional": "",
"searchopts": "",
"versions": "",
"cache": "/home/pi/.npm",
"send_metrics": "",
"global_style": "",
"ignore_scripts": "",
"version": "",
"local_address": "",
"viewer": "man",
"node_gyp": "/usr/lib/node_modules/npm/node_modules/node-gyp/bin/node-gyp.js",
"audit_level": "low",
"prefer_offline": "",
"color": "true",
"sign_git_commit": "",
"fetch_retry_mintimeout": "10000",
"maxsockets": "50",
"offline": "",
"sso_poll_frequency": "500",
"umask": "0022",
"fund": "true",
"fetch_retry_maxtimeout": "60000",
"logs_max": "10",
"message": "%s",
"ca": "",
"cert": "",
"global": "",
"link": "",
"access": "",
"also": "",
"save": "true",
"unicode": "true",
"before": "",
"long": "",
"production": "",
"searchlimit": "20",
"unsafe_perm": "true",
"update_notifier": "true",
"auth_type": "legacy",
"node_version": "14.17.2",
"tag": "latest",
"git_tag_version": "true",
"commit_hooks": "true",
"script_shell": "",
"shrinkwrap": "true",
"fetch_retry_factor": "10",
"save_exact": "",
"strict_ssl": "true",
"dev": "",
"globalconfig": "/usr/etc/npmrc",
"init_module": "/home/pi/.npm-init.js",
"parseable": "",
"globalignorefile": "/usr/etc/npmignore",
"cache_lock_retries": "10",
"searchstaleness": "900",
"node_options": "",
"save_prefix": "^",
"scripts_prepend_node_path": "warn-only",
"group": "1000",
"init_author_email": "",
"searchexclude": "",
"git": "git",
"optional": "true",
"json": ""
}
}
| [
"58940060+machigattan@users.noreply.github.com"
] | 58940060+machigattan@users.noreply.github.com |
8181821781cbf9aadfd268ec9cb137c7902ca927 | 9d4471d2c22059a19982d66b18cb2de7517943e7 | /ccxt_unmerged/async_support/foblgate.py | 6fb8fe0fedd15446f91d6fe53a24c86a3cad941a | [] | no_license | pok1800/ccxt-unmerged | 89277da0f11051e9fdf3bca1f07368ee88163c23 | abb4e6947eb75557c09a8a9822ea64459f86b2e7 | refs/heads/master | 2023-07-14T11:02:57.420711 | 2021-08-24T20:12:18 | 2021-08-24T20:12:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,821 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import base64
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InvalidOrder
class foblgate(Exchange):
def describe(self):
return self.deep_extend(super(foblgate, self).describe(), {
'id': 'foblgate',
'name': 'FOBLGATE',
'countries': ['KR'], # South Korea
'rateLimit': 500,
'has': {
'CORS': True,
'createOrder': True,
'cancelOrder': True,
'createMarketOrder': True,
'fetchTicker': False,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
'fetchOrder': True,
'fetchTrades': True,
'fetchMyTrades': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/69025125/89286704-a5495200-d68d-11ea-8486-fe3fa693e4a6.jpg',
'api': {
'public': 'https://api2.foblgate.com',
'private': 'https://api2.foblgate.com',
},
'www': 'https://www.foblgate.com',
'doc': 'https://api-document.foblgate.com',
'fees': 'https://www.foblgate.com/fees',
},
'api': {
'public': {
'post': [
'ccxt/marketList',
'ccxt/orderBook',
'ccxt/trades',
],
},
'private': {
'post': [
'ccxt/balance',
'ccxt/myTrades',
'ccxt/createOrder',
'ccxt/cancelOrder',
'ccxt/orderDetail',
'ccxt/openOrders',
'ccxt/closedOrders',
],
},
},
'requiredCredentials': {
'uid': True,
},
'exceptions': {
'400': BadRequest,
'401': AuthenticationError,
'403': AuthenticationError,
'500': ExchangeError,
},
})
async def fetch_markets(self, params={}):
response = await self.publicPostCcxtMarketList(params)
marketList = self.safe_value(response, 'marketList')
# {
# 'ETH/BTC': {
# limits: {amount: [Object], price: [Object], cost: [Object]},
# precision: {amount: 8, price: 8},
# tierBased: False,
# percentage: True,
# taker: 0.03,
# maker: 0.03,
# symbol: 'ETH/BTC',
# active: True,
# baseId: 'ETH',
# quoteId: 'BTC',
# quote: 'BTC',
# id: 'ETH-BTC',
# base: 'ETH',
# info: {market: 'ETH/BTC', coinName: 'ETH', coinNameKo: '이더리움'}
# }
# }
return marketList
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'pairName': symbol,
}
if limit is not None:
request['count'] = limit
response = await self.publicPostCcxtOrderBook(self.extend(request, params))
# {
# bids: [
# [303100, 11.68805904],
# [303000, 0.61282982],
# [302900, 0.59681086]
# ],
# asks: [
# [303700, 0.99953148],
# [303800, 0.66825562],
# [303900, 1.47346607],
# ],
# timestamp: None,
# datetime: None,
# nonce: None
# }
return self.parse_order_book(response, symbol, None, 'bids', 'asks', 'price', 'amount')
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "transaction_date":"2020-04-23 22:21:46",
# "type":"ask",
# "units_traded":"0.0125",
# "price":"8667000",
# "total":"108337"
# }
#
# fetchOrder(private)
#
# {
# "transaction_date": "1572497603902030",
# "price": "8601000",
# "units": "0.005",
# "fee_currency": "KRW",
# "fee": "107.51",
# "total": "43005"
# }
#
# a workaround for their bug in date format, hours are not 0-padded
timestamp = None
transactionDatetime = self.safe_string(trade, 'transaction_date')
if transactionDatetime is not None:
parts = transactionDatetime.split(' ')
numParts = len(parts)
if numParts > 1:
transactionDate = parts[0]
transactionTime = parts[1]
if len(transactionTime) < 8:
transactionTime = '0' + transactionTime
timestamp = self.parse8601(transactionDate + ' ' + transactionTime)
else:
timestamp = self.safe_integer_product(trade, 'transaction_date', 0.001)
if timestamp is not None:
timestamp -= 9 * 3600000 # they report UTC + 9 hours, server in Korean timezone
type = None
side = self.safe_string(trade, 'type')
side = 'sell' if (side == 'ask') else 'buy'
id = self.safe_string(trade, 'cont_no')
symbol = None
if market is not None:
symbol = market['symbol']
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'units_traded')
cost = self.safe_float(trade, 'total')
if cost is None:
if amount is not None:
if price is not None:
cost = price * amount
fee = None
feeCost = self.safe_float(trade, 'fee')
if feeCost is not None:
feeCurrencyId = self.safe_string(trade, 'fee_currency')
feeCurrencyCode = self.common_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrencyCode,
}
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': None,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pairName': symbol,
'since': since,
'cnt': limit,
}
response = await self.publicPostCcxtTrades(self.extend(request, params))
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pairName': symbol,
'cnt': limit,
'since': since,
}
response = await self.privatePostCcxtMyTrades(self.extend(request, params))
data = self.safe_value(response, 'data', [])
return self.parse_trades(data, market, since, limit)
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privatePostCcxtBalance(params)
# {
# BTC: {total: 0, used: 0, free: 0},
# ETH: {total: 0, used: 0, free: 0},
# info: {}
# }
return self.parse_balance(response)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
if type == 'market':
raise InvalidOrder(self.id + ' createOrder type = market, currently not supported.')
action = None
if side == 'buy':
action = 'bid'
elif side == 'sell':
action = 'ask'
else:
raise InvalidOrder(self.id + ' createOrder allows buy or sell side only!')
await self.load_markets()
market = self.market(symbol)
request = {
'pairName': market['symbol'],
'type': type,
'action': action,
'amount': self.amount_to_precision(symbol, amount),
'price': self.price_to_precision(symbol, price),
}
response = await self.privatePostCcxtCreateOrder(self.extend(request, params))
# {
# info: {data: '2008042'},
# id: '2008042',
# symbol: 'BTC/KRW',
# type: 'limit',
# side: 'buy',
# amount: 0.1,
# price: 9000000
# }
return response
async def cancel_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'ordNo': id,
}
response = await self.privatePostCcxtCancelOrder(self.extend(request, params))
# {status: '0'}
return response
def parse_order(self, order, market=None):
id = self.safe_string(order, 'id')
timestamp = self.safe_value(order, 'timestamp')
lastTradeTimestamp = self.safe_value(order, 'lastTradeTimestamp')
symbol = self.safe_string(order, 'symbol')
type = self.safe_string(order, 'type')
side = self.safe_string(order, 'side')
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'amount')
cost = self.safe_float(order, 'cost')
average = self.safe_float(order, 'average')
filled = self.safe_float(order, 'filled')
remaining = self.safe_float(order, 'remaining')
status = self.safe_string(order, 'status')
fee = self.safe_value(order, 'fee')
trades = self.safe_value(order, 'trades', [])
trades = self.parse_trades(trades, market, None, None, {
'order': id,
'type': type,
})
return {
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': trades,
}
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'ordNo': id,
}
response = await self.privatePostCcxtOrderDetail(self.extend(request, params))
order = self.safe_value(response, 'order')
return self.parse_order(order)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pairName': market['symbol'],
'since': since,
'cnt': limit,
}
response = await self.privatePostCcxtOpenOrders(self.extend(request, params))
orderList = self.safe_value(response, 'orderList', [])
return self.parse_orders(orderList, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'pairName': market['symbol'],
'since': since,
'cnt': limit,
}
response = await self.privatePostCcxtClosedOrders(self.extend(request, params))
orderList = self.safe_value(response, 'orderList', [])
return self.parse_orders(orderList, market, since, limit)
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
endpoint = '/' + self.implode_params(path, params)
url = self.urls['api'][api] + endpoint
query = self.omit(params, self.extract_params(path))
if method != 'POST':
if query:
url += '?' + self.urlencode(query)
else:
if api == 'private':
self.check_required_credentials()
body = self.urlencode(query)
nonce = str(self.nonce())
auth = self.urlencode(self.extend({
'apiKey': self.apiKey,
'mbId': self.uid,
'nonce': nonce,
}, query))
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha512)
signature64 = self.decode(base64.b64encode(self.encode(signature)))
headers = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
'Api-Key': self.apiKey,
'Api-Uid': self.uid,
'Api-Sign': str(signature64),
'Api-Nonce': nonce,
}
else:
body = self.urlencode(query)
headers = {
'Accept': 'application/json',
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
code = self.safe_value(response, 'code')
if code is not None:
if code == '0':
return
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions, code, feedback)
raise ExchangeError(feedback) # unknown message
| [
"binares@protonmail.com"
] | binares@protonmail.com |
c532a87f0b34bea899eef6b16ac2751878b160e7 | d0fcb45c6016aa5965ecf864545de20f5d513543 | /UserAuthentication_Project/UserAuthentication_Project/urls.py | 91273e7049c228b5fb25f5a6ec0656d0135178b6 | [] | no_license | shawongithub/user_authentication | aef72eb8bcef42d84b6a363843c41f4f7c084c7e | e05420ea2c7f010bc519a32fb9eadb280be53a3f | refs/heads/master | 2021-03-20T18:41:43.959033 | 2020-03-14T06:36:23 | 2020-03-14T06:36:23 | 247,224,968 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | """UserAuthentication_Project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('Login_app.urls')),
]
| [
"shawonislam121009@gmail.com"
] | shawonislam121009@gmail.com |
ec37836dacabe37eaae57719618fa1eba075bd76 | 65c20548a00651d66fe84b68df6a48147e0f2bdf | /get_transformed_data_with_decay.py | d2f16fa14ecd4a7312abaaccc84a761669dd076b | [
"MIT"
] | permissive | BoyuanWangMust/EEC | c4110e46ef179f18ba2aa378507727ca53604f73 | ffb65e6701f5316b69c1ef3c3c130f00b73a18da | refs/heads/main | 2023-04-16T21:56:26.445157 | 2021-05-02T05:30:49 | 2021-05-02T05:30:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | import os
import sys
import pickle
import numpy as np
from skimage import io
import matplotlib.pyplot as plt
import numpy
import torch
from torch.utils.data import Dataset
class getTransformedData(Dataset):
"""transformed dataset for incremental learning
"""
def __init__(self, images, labels, transform=None, ages = None,seed=1):
#np.random.seed(seed)
#torch.manual_seed(seed)
self.train_images = images
self.labels = labels
self.train_images = np.array(self.train_images)
self.labels = np.array(self.labels)
#if transform is given, we transoform data using
self.transform = transform
self.ages = ages
if self.ages is not None:
self.ages = np.array(self.ages)
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
label = self.labels[index]
#r = self.train_images[index, :1024].reshape(32, 32)
#g = self.train_images[index, 1024:2048].reshape(32, 32)
#b = self.train_images[index, 2048:].reshape(32, 32)
#image = numpy.dstack((r, g, b))
image = self.train_images[index]
if self.transform:
image = self.transform(np.uint8(image))
if self.ages is not None:
age = self.ages[index]
return image, label, age
else:
return image, label
| [
"noreply@github.com"
] | BoyuanWangMust.noreply@github.com |
39279e97122d85a82bc5e67ef6f54acfac624980 | a923ed38cae5bf9443fd10829ce84623f8cec52f | /SpecificRiskEstimation.py | 74ff1d952673017b5441860913726914a731b4d2 | [] | no_license | drnighthan/MFM | 740073c51e9805ddfc1186915a8c6ea7da635b9c | 6d80a744c9b4b6011b803f156fbb79e6e7776252 | refs/heads/master | 2021-09-10T21:14:56.802129 | 2018-04-02T07:42:03 | 2018-04-02T07:42:03 | 126,920,423 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,465 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 22 11:24:15 2018
@author: Han
"""
import pandas as pd
import os
import numpy as np
def SpecificRisk(df,factorret,time,stock,window,indcode = '2012',method = 'EWMA',halfday=60):
if type(df) == str:
df = pd.read_csv(df,parse_dates=[str(time)])
del df['Unnamed: 0']
elif type(df) == pd.DataFrame:
df[str(time)] = pd.to_datetime(df[str(time)])
if type(factorret) == str:
factorret = pd.read_csv(factorret,parse_dates=[str(time)])
del factorret['Unnamed: 0']
elif type(factorret) == pd.DataFrame:
factorret[str(time)] = pd.to_datetime(factorret[str(time)])
industrypath = os.path.join(os.path.abspath('.'),'InPutData','company_info_1990_2017','TRD_Co.csv')
industry = pd.read_csv(industrypath,encoding = 'gbk')
if indcode == '2001':
industryname = 'Nindcd'
elif indcode == '2012':
industryname = 'Nnindcd'
elif indcode == 'LargeIndustry':
industryname = 'Indcd'
temp = pd.get_dummies(industry[industryname])
industry = pd.concat([industry[['Stkcd']],temp],axis=1)
industry = industry.rename({'Stkcd':str(stock)})
df = pd.merge(df,industry,on=[str(stock)])
factorlist = list(factorret.columns)
try:
factorlist.remove(str(time))
factorlist.remove('const')
factorlist.remove('num')
except:
pass
data = pd.merge(df,factorret,on=[str(time)])
templist = [data[i+'_x']*data[i+'_y'] for i in factorlist]
data['fitted'] = np.sum(templist,axis=0)
data['fitted'] = data['fitted'] + data['const']
retdatapath = os.path.join(os.path.abspath('.'),'InPutData','monthly_return_2005-2017')
retdata = pd.DataFrame()
for i in os.listdir(retdatapath):
temp = os.path.join(retdatapath,i,'TRD_Mnth.csv')
temp = pd.read_csv(temp,parse_dates = ['Trdmnt'])
retdata = pd.concat([retdata,temp],axis=0)
retdata = retdata[retdata['Markettype']!= 2]
retdata = retdata[retdata['Markettype']!= 8]
retdata = retdata[['Stkcd','Trdmnt','Mretnd']]
retdata.columns = [str(stock),str(time),'Mretnd']
retdata = retdata.drop_duplicates(['Stkcd','Trddt'])
data = pd.merge(data,retdata,on=[str(time),str(stock)])
data['residual'] = data['Mretnd'] - data['fitted']
data = data[[str(stock),str(time),'residual']]
def weightcreate(x,num):
weight = (0.5)**(x)
weightlist = [(weight**(num-i))**2 for i in range(num)]
return weightlist
ewmaweight = weightcreate(1/halfday,window)
def SpecificVarCal(df,window,method):
df = df.sort_values(str(time))
if method == 'EWMA':
df['SpecificVar'] = df['residual'].rolling(window).apply(lambda x : np.var(x*ewmaweight))
else:
df['SpecificVar'] = df['residual'].rolling(window).var()
del df[str(stock)]
return df
data = data.groupby(str(stock)).apply(SpecificVarCal,window,method).reset_index()
data = data.dropna()
del data['level_1']
data = data[[str(stock),str(time),'SpecificVar']]
return(data)
if __name__ == '__main__' :
df = os.path.join(os.path.abspath('.'),'Data','Factor_Preprocessing.csv')
factorret = os.path.join(os.path.abspath('.'),'Data','Factor_Return_Regression.csv')
test = SpecificRisk(df,factorret,'Trddt','Stkcd',12).dropna()
test.to_csv(os.path.join(os.path.abspath('.'),'Data','SpecificRisk.csv'))
| [
"awsaws98@163.com"
] | awsaws98@163.com |
ffc8618fc8a2d869759cb4bd9356f6df5ecafba8 | 14efc9ec42d4fe9beecad1cff66bba77e68b8b74 | /vinod/Scripts/easy_install-3.7-script.py | 7c2e1061a952cf3de73b1ff4da86706252c40275 | [] | no_license | vinodkumar96/ModelForms_ClassBased | b515386554e6241af749742319d46d317755d48f | c1435b7145ac9eb9dff45618ad4c3446667cdc5a | refs/heads/master | 2020-06-14T19:15:55.312078 | 2019-07-03T17:38:07 | 2019-07-03T17:38:07 | 195,100,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 482 | py | #!C:\PyCharm\PycharmProjects\new_projects\ModelForms_ClassBased\vinod\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| [
"vinodkumaryv96@gmail.com"
] | vinodkumaryv96@gmail.com |
e884df3f64553ec1d649d28e8ef4317ea6af6c01 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /organizations_write_f/delegated-administrator_deregister.py | 92bfd542b0cbd45339078af1faf66f98e2e5cf32 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/describe-instances.html
if __name__ == '__main__':
"""
list-delegated-administrators : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/organizations/list-delegated-administrators.html
register-delegated-administrator : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/organizations/register-delegated-administrator.html
"""
write_parameter("organizations", "deregister-delegated-administrator") | [
"hcseo77@gmail.com"
] | hcseo77@gmail.com |
cef5c6bcf329c32d42ac70d1fddaa4b1504853c0 | b6a99d9a8ec53558c0bf90268efafbb26771cfca | /Blog/migrations/0016_alter_ml_img.py | 6f2a5cf9e462ebf1665d5e7e37b82730ff1c379c | [] | no_license | Akanksha2403/pb | a8837362a63de2bcb689913699805b102dd76545 | 7c1aa1cb54f58c00a29a15838aba8127b51ed97e | refs/heads/main | 2023-08-06T04:03:39.596242 | 2021-10-11T05:51:13 | 2021-10-11T05:51:13 | 395,682,254 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | # Generated by Django 3.2.6 on 2021-08-12 05:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Blog', '0015_alter_ml_img'),
]
operations = [
migrations.AlterField(
model_name='ml',
name='img',
field=models.ImageField(default='', upload_to='media'),
),
]
| [
"lit2020026@iiitl.ac.in"
] | lit2020026@iiitl.ac.in |
79f0b49bb22f453694f1077838883504b66f2b6a | 1503b5e3150951d5ed8294501212c6c3fab06663 | /tests/test_dependencies/test_disable.py | 54512d37a4b3ac2b8c8605df277c35e23e789456 | [
"MIT"
] | permissive | dshepelev15/fastapi-crudrouter | 714ffa11721734c5318a124e17c96234931f731f | 5e38d0273c8992ea27f60e507800f2e3760973d6 | refs/heads/master | 2023-07-15T04:04:18.290985 | 2021-08-30T01:50:54 | 2021-08-30T01:50:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | from fastapi import Depends, HTTPException
from fastapi.security import OAuth2PasswordBearer
import pytest
from fastapi_crudrouter.core import CRUDGenerator
from tests.implementations import implementations
from tests.conftest import yield_test_client
URLS = ["/potato", "/carrot"]
AUTH = {"Authorization": "Bearer my_token"}
KEY_WORDS = {f"{r}_route" for r in CRUDGenerator.get_routes()}
DISABLE_KWARGS = {k: False for k in KEY_WORDS}
@pytest.fixture(params=implementations, scope="class")
def client(request):
impl, dsn = request.param
app, router, settings = impl(db_uri=dsn)
[app.include_router(router(**s, **DISABLE_KWARGS)) for s in settings]
yield from yield_test_client(app, impl)
@pytest.mark.parametrize("url", URLS)
def test_route_disable(client, url):
assert client.get(url).status_code == 404
assert client.get(url).status_code == 404
assert client.post(url).status_code == 404
for id_ in [-1, 1, 0, 14]:
assert client.get(f"{url}/{id_}").status_code == 404
assert client.put(f"{url}/{id_}").status_code == 404
assert client.delete(f"{url}/{id_}").status_code == 404
| [
"cadamrun@gmail.com"
] | cadamrun@gmail.com |
7b72d2a794746c8ae3ce5d3b0062853fb3515f96 | 02f3fcbd495c2282694ad0612a66640b4731582f | /helloworld/helloworld.py | 610c9184d9177bfd6ddccf2c45d3cd1da7bb4e73 | [
"MIT"
] | permissive | plter/LearnKivy2021 | 8f39f99bf3123d23230e047e853dfeba983329f8 | 04bf9c19c707528f4be8ca666192363aae95fb9f | refs/heads/main | 2023-06-04T04:03:41.829403 | 2021-06-24T04:06:31 | 2021-06-24T04:06:31 | 379,203,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 208 | py | import os
os.environ['KIVY_IMAGE'] = "pil"
from kivy.app import App
from kivy.core.window import Window
Window.size = (400, 300)
class MyApp(App):
pass
if __name__ == '__main__':
MyApp().run()
| [
"xtiqin@163.com"
] | xtiqin@163.com |
b0f81d7ba26446092e938f717a6aea4f3573868b | 2f4b56ef73bd5e22962be42799938f3ef26ea9a0 | /bookflixapp/views.py | 28a6b0f81b8f3c75102b2b1c01d1be33007c7542 | [] | no_license | pedro-chiappani/ing2_bookflix | 2d058634830302ef0fd0a250a0f688d4d485ce96 | 1cc252f39de24d95829d1333cebc52174132fc10 | refs/heads/master | 2022-10-29T22:06:51.724555 | 2020-06-17T21:47:34 | 2020-06-17T21:47:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,959 | py | from django.shortcuts import render, redirect
from bookflixapp.models import Libro, Novedad, Capitulo, Perfil, Usuario, UsuarioCust
from datetime import timedelta
from django.utils import timezone
from django.http import request as rq
from django.contrib.auth import logout as do_logout
from django.contrib.auth import authenticate
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import hashers
from django.contrib.auth import login as do_login
from .forms import RegistrationForm, CreateProfileForm
from .filters import LibroFilter
# from .forms import FormularioAgregarLibro
# Create your views here.
"""
def agregar_libro(request):
if request.method == 'POST':
formularioLibro = FormularioAgregarLibro(request.POST)
if formularioLibro.is_valid():
titulo_libro = formularioLibro.cleaned_data['titulo_campo']
nropaginas_libro = formularioLibro.cleaned_data['nropaginas_campo']
nrocapitulos_libro = formularioLibro.cleaned_data['nrocapitulos_campo']
isbn_libro = formularioLibro.cleaned_data['isbn_campo']
autor_libro = formularioLibro.cleaned_data['autor_campo']
editorial_libro = formularioLibro.cleaned_data['editorial_campo']
genero_libro = formularioLibro.cleaned_data['genero_campo']
agnoedicion_libro = formularioLibro.cleaned_data['agnoedicion_campo']
nuevo_libro = Libro(titulo=titulo_libro, nropaginas=nropaginas_libro, nrocapitulos=nrocapitulos_libro, isbn=isbn_libro, autor=autor_libro, editorial=editorial_libro, agnoedicion=agnoedicion_libro)
nuevo_libro.save()
nuevo_libro.genero.add(*genero_libro)
return render(request, "agregar_libro.html", {'formularioLibro': formularioLibro})
else:
formularioLibro = FormularioAgregarLibro()
return render(request, "agregar_libro.html", {'formularioLibro': formularioLibro})
"""
def ver_libros(request):
filtro = LibroFilter(request.GET, queryset=Libro.objects.all())
return render(request, "ver_libros.html", {"filter": filtro})
def ver_capitulos(request, pk):
capitulos = Capitulo.objects.filter(libro__id=pk)
if len(capitulos) > 0: # parche temporal para los libros que no tienen capitulos
titulo = capitulos[0].libro
# el parametro lo recibe de urls. lo que hago es filtrar los capitulos
# que pertenecen al libro que recibo como parametro
# (si hiciese objects.all() me estoy quedando con todos los capitulos de todos los libros)
return render(request, "ver_capitulos.html", {"capitulos": capitulos, "titulo": titulo})
else:
return render(request, "index.html") # si no se le subio capitulo te manda a index
def index(request):
d = timezone.now() - timedelta(days=7)
f = timezone.now()
delt = timedelta(days=7)
aux = d.date()
novedades = Novedad.objects.filter(creacion__gte=d)
return render(request, "index.html", {"novedades": novedades})
def register(request):
# Creamos el formulario de autenticación vacío
form = RegistrationForm()
if request.method == "POST":
# Añadimos los datos recibidos al formulario
form = RegistrationForm(data=request.POST)
# Si el formulario es válido...
if form.is_valid():
# Creamos la nueva cuenta de usuario
username = form.cleaned_data["email"]
realpassword = hashers.make_password(password=form.cleaned_data["password1"])
first_name = form.cleaned_data["first_name"]
last_name = form.cleaned_data["last_name"]
tarjeta = form.cleaned_data["tarjeta"]
fecha = form.cleaned_data["fecha_de_nacimiento"]
u = User(username=username, first_name=first_name, last_name=last_name, password=realpassword, email=username)
u.save()
user = Usuario(user=u, fecha_de_nacimiento=fecha, tarjeta=tarjeta)
# Si el usuario se crea correctamente
if user is not None:
# Hacemos el login manualmente
user.save()
p = Perfil(usuario=user, username=u.first_name)
p.save()
do_login(request, u)
# Y le redireccionamos a la portada
return redirect('/')
# Si queremos borramos los campos de ayuda
#form.fields['username'].help_text = None
form.fields['password1'].help_text = None
form.fields['password2'].help_text = None
# Si llegamos al final renderizamos el formulario
return render(request, "register.html", {'form': form})
def login(request):
# Creamos el formulario de autenticación vacío
form = AuthenticationForm()
if request.method == "POST":
# Añadimos los datos recibidos al formulario
form = AuthenticationForm(data=request.POST)
# Si el formulario es válido...
#if form.is_valid():
# Recuperamos las credenciales validadas
username = request.POST["email"]
password = request.POST["pass"]
# Verificamos las credenciales del usuario
user = authenticate(username=username, password=password)
# Si existe un usuario con ese nombre y contraseña
if user is not None:
# Hacemos el login manualmente
do_login(request, user)
if user.is_superuser:
return redirect("/admin") # or your url name
# Y le redireccionamos a la portada
else:
return redirect('/')
#return render(request, "index.html")
else:
return redirect('/register')
#else:
#return redirect('/register')
# Si llegamos al final renderizamos el formulario
return render(request, "login.html", {'form': form})
def logout(request):
# Finalizamos la sesión
do_logout(request)
# Redireccionamos a la portada
return redirect('/')
def createprofile(request):
if request.method == "POST":
form = CreateProfileForm(data=request.POST)
if form.is_valid():
profilename = form.cleaned_data["profilename"]
user = request.user
usuario = Usuario.objects.get(user=user)
profile = Perfil(usuario=usuario, username=profilename)
profile.save()
if profile is not None:
return redirect("/")
else:
form = CreateProfileForm()
return render(request, "crear_perfil.html", {'form': form})
def verperfil(request):
if request.method == "GET":
user = request.user
anon = User(AnonymousUser)
if user.username != "":
usuario = Usuario.objects.filter(user=user)
perfil = Perfil.objects.filter(usuario=usuario[0], selected=True)
return render(request, 'perfil.html', {"perfil": perfil[0]})
else:
return render(request, 'perfil.html')
else:
if request.method == "POST":
name = request.POST["nombre"]
user = request.user
usuario = Usuario.objects.get(user=user)
perfil_sel = Perfil.objects.filter(selected=True, usuario=usuario)
perfil = Perfil.objects.filter(username=name, usuario=usuario)
p = perfil_sel[0]
p.selected = False
p.save()
p2 = perfil[0]
p2.selected = True
p2.save()
return render(request, 'perfil.html', {"perfil": perfil[0]})
def selecperfil(request):
if request.method == "GET":
user = request.user
usuario = Usuario.objects.filter(user=user)
perfiles = Perfil.objects.filter(usuario=usuario[0])
return render(request, 'selec_perfil.html', {"perfiles": perfiles})
if request.method == "POST":
return render(request, 'perfil.html')
| [
"49994882+Deth11@users.noreply.github.com"
] | 49994882+Deth11@users.noreply.github.com |
70d1be3f7a31b63359aadd3d48a8a66387b66115 | c5afcbdf8e251f2e0845ab72fc26aee296325ad6 | /src/piazza/migrations/0004_auto_20210415_2112.py | 453d7e8534e4f50090754d5eb44d72b226c4aa8a | [] | no_license | katefaz/Piazza | f2382bf4a7eb9f6e98099e9137277082509aa0a8 | c5fded5bd3edcb1e53a2a5fd628a45e4ca9f069a | refs/heads/master | 2023-04-04T10:29:41.875296 | 2021-04-18T15:31:49 | 2021-04-18T15:31:49 | 358,870,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 855 | py | # Generated by Django 3.0.2 on 2021-04-15 21:12
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('piazza', '0003_auto_20210414_1531'),
]
operations = [
migrations.RemoveField(
model_name='message',
name='message_n_comments',
),
migrations.RemoveField(
model_name='message',
name='message_n_dislikes',
),
migrations.RemoveField(
model_name='message',
name='message_n_likes',
),
migrations.AlterField(
model_name='message',
name='message_expires',
field=models.DateTimeField(default=datetime.datetime(2021, 4, 15, 22, 27, 40, 283986, tzinfo=utc)),
),
]
| [
"57639049+katefaz@users.noreply.github.com"
] | 57639049+katefaz@users.noreply.github.com |
550f9ff046c94238fcba3a900c71d6df7d5b48c1 | f62621131071e20d57d009a197cd0d0d5307d282 | /common.py | 2850d0763571f798abd838e828f1e27d7048db4c | [] | no_license | markryanandrews/M5-Forecasting-Accuracy | a9243f95c38a31358c7b57dbe0858381bcf271aa | 9f849b02154f13c9059d999b5106f4af230765ad | refs/heads/master | 2022-04-21T00:43:01.465820 | 2020-04-16T21:02:48 | 2020-04-16T21:02:48 | 255,490,342 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,280 | py | import datetime as dt
import numpy as np
import statsmodels.graphics.tsaplots as smt
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
### DATA CLEANING / MANIPULATING FUNCTIONS ###
def log_frame(f):
def wrapper(dataf, *args, **kwargs):
tic = dt.datetime.now()
result = f(dataf, *args, **kwargs)
toc = dt.datetime.now()
print(f"{f.__name__} took {toc - tic}, shape = {result.shape}")
return result
return wrapper
@log_frame
def start_pipeline(dataf):
return dataf.copy()
@log_frame
def clean_sell_price(dataf):
dataf['store_id'] = dataf['store_id'].astype('category')
return dataf
@log_frame
def clean_calendar(dataf):
#Date/Time Types
dataf['date'] = pd.to_datetime(dataf['date'])
dataf['weekday'] = dataf['date'].dt.day_name()
dataf['wday'] = dataf['date'].dt.dayofweek
dataf['month'] = dataf['date'].dt.month
dataf['year'] = dataf['date'].dt.year
#Event Types
dataf['event_name_1'] = dataf['event_name_1'].astype('category')
dataf['event_type_1'] = dataf['event_type_1'].astype('category')
dataf['event_name_2'] = dataf['event_name_2'].astype('category')
dataf['event_type_2'] = dataf['event_type_2'].astype('category')
dataf.set_index('date' , inplace = True)
return dataf
@log_frame
def clean_sales(dataf):
dataf['dept_id'] = dataf['dept_id'].astype('category')
return dataf
@log_frame
def filter_item(item_name, sales_df, calendar_df):
"""
Function: Combines Walmart Item Sales.
Attributes:
item_name (string): Unique Item
sales_df (pandas.DataFrame)
calendar_df (pandas.DataFrame)
Returns:
dataf (pandas.DataFrame): Complete Unique Item Sale Information
"""
# get sales data from sales (d-columns)
d_cols = [c for c in sales_df.columns if 'd_' in c] # sales data columns
dataf = sales_df.loc[sales_df['id'] == item_name].set_index('id')[d_cols].T
# merge sales with calendar data
dataf = pd.merge(left = dataf , right = calendar_df, left_index = True, right_on = 'd')
return dataf.drop(columns = ['d'])
@log_frame
def item_sales(item_name, sales_df, calendar_df):
item_sale_df = filter_item(item_name, sales_df, calendar_df)
return item_sale_df.iloc[:,:1]
### TIME SERIES FUNCTIONS ###
def tsplot(y , title, lags = None, diff = 0, figsize = (16,8)):
'''
Examine patterns with
Time Series plot,
Histogram plot,
ACF and PACF plots
'''
if diff > 5:
return "Error: diff too high, Please choose [0-5]"
if diff > 0:
y = y.diff(diff)[diff:]
fig, ((ts_ax, hist_ax), (acf_ax, pacf_ax)) = plt.subplots(2, 2, figsize=figsize)
#fig.suptitle('Time Series Analysis', fontsize =16)
y.plot(ax = ts_ax)
ts_ax.set_title(title, fontsize = 14, fontweight = 'bold')
y.plot(ax = hist_ax, kind = 'hist', bins = 25)
hist_ax.set_title(title, fontsize = 14, fontweight = 'bold')
smt.plot_acf(y, lags = lags, ax = acf_ax)
smt.plot_pacf(y, lags = lags, ax = pacf_ax)
[ax.set_xlim(-.5) for ax in [acf_ax, pacf_ax]]
sns.despine()
plt.tight_layout()
return ts_ax, hist_ax, acf_ax, pacf_ax
| [
"markryanandrews794@gmail.com"
] | markryanandrews794@gmail.com |
1eca7ec2e36e7cae8c2c4ef9e80e9f1268d6c6e8 | 7f9396be71cdf243930d77638f59aa76c135c9c8 | /virtual/bin/sqlformat | 451f4a53a61bc6a3280c8e6bb71755bdd4a53720 | [] | no_license | CollinsMuiruri/Collins | accb894d620104e49de6660127a6a0074cf7f57e | 9d94d528d94821983681fa8d1e5801f16878b464 | refs/heads/main | 2023-01-23T01:14:07.844954 | 2020-12-09T01:10:38 | 2020-12-09T01:10:38 | 316,529,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | #!/home/collins/Documents/me/Collins/Collins/virtual/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"wanyekicollins@gmail.com"
] | wanyekicollins@gmail.com | |
bf8263ecfb91ac5e21f4224cec0c3ec162cc75f0 | 99b4e110fb350647f5bd5d9d362e9d9697a6ab53 | /Detect faces.py | ed5c5ac869ff2b8736b3bdac1c989fc86dc3e621 | [] | no_license | kartikey-kawadkar24/Face-Mask-Detection | bdad243c253d3e2787d75f9956b3270885ba41e0 | 78eb826ce11fd5546ba98d22fa34c0e2d1e92965 | refs/heads/main | 2023-03-26T11:05:57.182222 | 2021-03-28T12:30:19 | 2021-03-28T12:30:19 | 352,315,236 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,864 | py | import cv2
import os
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
import numpy as np
model = load_model("model-100.h5")
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
video_capture = cv2.VideoCapture(0)
while True:
# Capture frame-by-frame
ret, frame = video_capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor = 1.1,
minNeighbors = 5,
minSize = (30, 30),
flags = cv2.CASCADE_SCALE_IMAGE)
faces_list = []
preds = []
for (x,y,w,h) in faces:
face_frame = frame[y: y + h, x: x + w]
face_frame = cv2.cvtColor(face_frame, cv2.COLOR_BGR2RGB)
face_frame = cv2.resize(face_frame, (150, 150))
face_frame = img_to_array(face_frame)
face_frame = np.expand_dims(face_frame, axis=0)
face_frame = preprocess_input(face_frame)
faces_list.append(face_frame)
if len(faces_list) > 0:
preds = model.predict(faces_list)
for pred in preds:
(mask, withoutMask) = pred
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
cv2.putText(frame, label, (x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (x, y), (x + w, y + h),color, 2)
# Display the resulting frame
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.destroyAllWindows()
| [
"noreply@github.com"
] | kartikey-kawadkar24.noreply@github.com |
fd7ec1acaa87be46590ee357a35c5e28c119d264 | 284af16d42b2e5a4fb2c7dd57b4f8b3a0a9cdfc0 | /manage.py | cd6df88503e24085a54203753042254862bebbbd | [] | no_license | exlevitas/book-market | 7317e31a3d3dc8f0d27666f1854f2f0b99b96301 | 500984d122f01588c865f48c7e502d4fed710e01 | refs/heads/master | 2023-08-04T22:26:33.957804 | 2021-09-20T09:29:49 | 2021-09-20T09:29:49 | 408,382,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'site0911.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"mr.daniil7701@gmail.com"
] | mr.daniil7701@gmail.com |
dfe50521c8beadbcedb0a1ea16159e0c00737c40 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03945/s610658846.py | ffb8861904f17b983b639980e4783fc5358be706 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | import collections
def main():
S = input()
cnt = 0
tmp = ""
for s in S:
if s != tmp:
cnt += 1
tmp = s
print(cnt - 1)
if __name__ == "__main__":
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
2e84570f93a0264cfb0e3aa6bb56de65f63c4736 | 2c493ae898600850415440967caed82142d02175 | /algorithms/EMGCN/embedding_model.py | a784ec0457699f68368cfe30e14a35d4cbf5f8d2 | [] | no_license | Remorax/EMGCN | e93f78c1880de1029f9003f22f8f241bbb8582b5 | d0ca2f4cb563af6c8bad8d15029e83880194f76a | refs/heads/master | 2023-04-25T15:42:31.904308 | 2021-05-13T08:20:58 | 2021-05-13T08:20:58 | 366,070,066 | 0 | 0 | null | 2021-05-13T08:21:14 | 2021-05-10T14:25:08 | Python | UTF-8 | Python | false | false | 4,615 | py | import torch
import torch.nn as nn
import numpy as np
from algorithms.EMGCN.utils import init_weight, get_act_function
# DONE
class GCN(nn.Module):
"""
The GCN multistates block
"""
def __init__(self, activate_function, input_dim, output_dim):
"""
activate_function: Tanh
input_dim: input features dimensions
output_dim: output features dimensions
"""
super(GCN, self).__init__()
if activate_function is not None:
self.activate_function = get_act_function(activate_function)
else:
self.activate_function = None
self.input_dim = input_dim
self.output_dim = output_dim
self.linear = nn.Linear(input_dim, output_dim, bias=False)
# self.linear = nn.DataParallel(self.linear)
init_weight(self.modules(), "tanh")
def forward(self, A_hat, input):
"""
:params A_hat: adjacency matrix for this GCN layer
:params input: input matrix for this GCN layer
"""
# last layer we do not have weight matrix
# if self.activate_function is not None:
output = self.linear(input)
output = torch.matmul(A_hat, output)
# do not activate at last layer
if self.activate_function is not None:
output = self.activate_function(output)
return output
class EM_GCN(nn.Module):
"""
Training a multilayer GCN model
"""
def __init__(self, activate_function, num_GCN_blocks, output_dim, \
num_source_nodes, num_target_nodes, source_feats=None, target_feats=None, direct=True):
"""
:params activate_function: Name of activation function
:params num_GCN_blocks: Number of GCN layers of model
:params output_dim: The number of dimensions of output
:params num_source_nodes: Number of nodes in source graph
:params num_target_nodes: Number of nodes in target graph
:params source_feats: Source Initialized Features
:params target_feats: Target Initialized Features
:params direct: Whether to run model in direct mode
"""
super(EM_GCN, self).__init__()
self.num_GCN_blocks = num_GCN_blocks
self.direct = direct
self.source_feats = source_feats
self.target_feats = target_feats
input_dim = self.source_feats.shape[1]
self.activate_function = get_act_function(activate_function)
self.GCNs = []
for i in range(num_GCN_blocks):
# Last layer is like GCN-align
if i == num_GCN_blocks - 1:
self.GCNs.append(GCN("", input_dim, output_dim))
else:
self.GCNs.append(GCN(activate_function, input_dim, output_dim))
input_dim = self.GCNs[-1].output_dim
self.GCNs = nn.ModuleList(self.GCNs)
init_weight(self.modules(), activate_function)
def forward_direct(self, A_hat, emb_input):
"""
:params A_hat: adjacency matrix for this GCN layer
:params emb_input: emb of the previous layer of initial embedding
"""
outputs = [emb_input]
GCN_input_i1 = emb_input
GCN_input_i2 = emb_input
for i in range(self.num_GCN_blocks):
GCN_output_i1 = self.GCNs[i](A_hat, GCN_input_i1)
GCN_output_i2 = self.GCNs[i](A_hat.t(), GCN_input_i2)
GCN_output_i = torch.cat((GCN_output_i1, GCN_output_i2), dim=1)
outputs.append(GCN_output_i)
GCN_input_i1 = GCN_output_i1
GCN_input_i2 = GCN_output_i2
return outputs
def forward_undirect(self, A_hat, emb_input):
"""
:params A_hat: adjacency matrix for this GCN layer
:params emb_input: emb of the previous layer of initial embedding
"""
outputs = [emb_input]
for i in range(self.num_GCN_blocks):
GCN_output = self.GCNs[i](A_hat, emb_input)
outputs.append(GCN_output)
emb_input = GCN_output
return outputs
def forward(self, A_hat, net='s'):
"""
Do the forward
:params A_hat: The sparse Normalized Laplacian Matrix
:params net: Whether forwarding graph is source or target graph
"""
if net == 's':
input = self.source_feats
else:
input = self.target_feats
emb_input = input.clone()
if self.direct:
return self.forward_direct(A_hat, emb_input)
else:
return self.forward_undirect(A_hat, emb_input)
| [
"vinhbachkhoait@gmail.com"
] | vinhbachkhoait@gmail.com |
d88d6b6157eb18f5eef399c9422c02efd9cb5029 | dc88afd23b53311833de00a110617adcb23b3f79 | /untitled5.py | 91c9a51f570413d98550c9b7e7ce595d8b5aab68 | [] | no_license | 948787878787/Repo-Python-Minecraft | 363c218d5e3f483cb140ebead57016b8b0591d71 | 861ecada56d8cc7c928d1d49df47db89c1b19bec | refs/heads/master | 2022-11-22T20:00:18.012419 | 2020-07-30T08:51:37 | 2020-07-30T08:51:37 | 283,066,243 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 29 11:48:02 2020
@author: appedu
"""
from mcpi.minecraft import Minecraft
mc = Minecraft.create()
x,y,z = mc.player.getTilePos()
for i in range(20):
mc.setBlock(x,y-1,z+i,1) | [
"noreply@github.com"
] | 948787878787.noreply@github.com |
e3c3c1ccf9b0b871ffe941a51793f25edb39bb2a | ebffa1ee650b295ca8a6870d2b4f1203b2a1a644 | /newbies/bin/sqlformat | 7a83930ae92c94594b4b20fab36eb5910f2722d2 | [] | no_license | jod35/django-for-newbies | 1b59ca87c6584cd8eedf03315eef51ed1d87451f | bbb4da8b049ecbe9aeb6ae76962bec5948e5d2ac | refs/heads/master | 2021-09-25T16:40:12.729187 | 2020-02-13T17:28:23 | 2020-02-13T17:28:23 | 236,735,383 | 1 | 0 | null | 2021-09-22T18:34:30 | 2020-01-28T12:56:25 | Python | UTF-8 | Python | false | false | 248 | #!/home/jona/django-for-newbies/newbies/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jona@localhost.localdomain"
] | jona@localhost.localdomain | |
f16de75dc6cac6980c3cda0f3bb90c13005a18b0 | 9fd0c7ec969c2bf5b4d47fadc15b12cc72f48acc | /Day16Challenge.py | 1b727386a40c8702ed9005b43ad129191f7528f6 | [] | no_license | kelvinng213/PythonDailyChallenge | b137f57d30d417dc4f2870f77213f894509cf854 | f640ef48f27d089224f2295e885f4a404cac3f27 | refs/heads/master | 2023-01-29T17:34:06.212321 | 2020-12-07T16:23:49 | 2020-12-07T16:23:49 | 290,100,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | # Given an integer n,
# find all the integers that is the multiple of 3 from 0 to n.
# Return the sum of all these integers.
n = 15
lst = []
for i in range(0,n+1,3):
lst.append(i)
print(lst[1:])
print(sum(lst))
# Example:
# Input:
# 10
# Multiples of 3 from 0 to 10:
# 3, 6, 9
# Return sum of these integers:
# 18 | [
"ngkelvin9@gmail.com"
] | ngkelvin9@gmail.com |
65c41b9102c94060f3a9e2fe38ffbef27730a345 | b8d52bdd79327a46f2629131af17f166c8bdbd45 | /swagger_server/test/test_default_controller.py | 8922b3d5e1ae9d1f857dfbf9db0598ee038b9525 | [] | no_license | casperschmit/DevOps_assigment2 | bfd7b65e2c24177546dfca6dc439ebdc1265582a | 02651d5ed11b36f6824dcb0a74dce75c52e99da1 | refs/heads/master | 2023-03-04T06:44:17.763851 | 2021-02-11T13:37:04 | 2021-02-11T13:37:04 | 337,705,443 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,604 | py | # coding: utf-8
from __future__ import absolute_import
from flask import json
from swagger_server.models.student import Student # noqa: E501
from swagger_server.test import BaseTestCase
import names
class TestDefaultController(BaseTestCase):
"""DefaultController integration test stubs"""
def test_add_student(self):
"""Test case for add_student
Add a new student
"""
body = Student()
body.first_name = names.get_first_name()
body.last_name = names.get_last_name()
body.grades = {'math': 8, 'history': 9}
response = self.client.open(
'/service-api/student',
method='POST',
data=json.dumps(body),
content_type='application/json')
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
self.assertTrue(response.is_json)
self.assertIsInstance(response.json, int)
def test_delete_student(self):
"""Test case for delete_student
"""
body = Student()
body.first_name = names.get_first_name()
body.last_name = names.get_last_name()
body.grades = {'math': 8, 'history': 9}
response = self.client.open(
'/service-api/student',
method='POST',
data=json.dumps(body),
content_type='application/json')
student_id = (response.json)
# response = self.client.open(
# '/service-api/student/{student_id}'.format(student_id=student_id),
# method='DELETE')
# self.assert200(response,
# 'Response body is : ' + response.data.decode('utf-8'))
#
# response = self.client.open(
# '/service-api/student/{student_id}'.format(student_id=-1),
# method='DELETE')
# self.assert404(response,
# 'Response body is : ' + response.data.decode('utf-8'))
def test_get_student_by_id(self):
"""Test case for get_student_by_id
Find student by ID
"""
body = Student()
body.first_name = names.get_first_name()
body.last_name = names.get_last_name()
body.grades = {'math': 8, 'history': 9}
response = self.client.open(
'/service-api/student',
method='POST',
data=json.dumps(body),
content_type='application/json')
student_id = (response.json)
query_string = [('math', 9)]
response = self.client.open(
'/service-api/student/{student_id}'.format(student_id=student_id),
method='GET',
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
self.assertTrue(response.is_json)
self.assertIsInstance(response.json, dict)
def test_get_student_by_lastname(self):
"""Test case for get_student_by_lastname
Find student by lastname
"""
# # query_string = [('last_name', 'last_name_example')]
# body = Student()
# # body.first_name = names.get_first_name()
# body.last_name = names.get_last_name()
# body.grades = {'math': 8, 'history': 9}
# response = self.client.open(
# '/service-api/student/',
# method='GET',
# query_string=body)
# self.assert200(response,
# 'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
import unittest
unittest.main()
| [
"c.p.schmit@me.com"
] | c.p.schmit@me.com |
3ec9ab0a1d25969db972acc7d7b463138ec36599 | b5d04cd0fa731bf9ddfe1db02bf5907ca9da428d | /commands/bar.py | 8e7a17a7766bf5bf919b277b21f0d2765fd87c39 | [] | no_license | jagijagijag1/typer-subcommand-sample | 374f22a228f55708dd41ecf04619db3fb7283932 | 5d88ea98761fd48973504de93209a7f3778530db | refs/heads/main | 2023-02-25T11:30:42.641493 | 2021-01-27T11:42:31 | 2021-01-27T11:42:31 | 333,280,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | import typer
barapp = typer.Typer(add_completion=False)
@barapp.command()
def main(arg: str):
typer.echo(f"This is bar command: {arg}")
if __name__ == "__main__":
barapp()
| [
"29348109+jagijagijag1@users.noreply.github.com"
] | 29348109+jagijagijag1@users.noreply.github.com |
34e6273c795886292ed63eed8929c2166aa013c3 | 69f4a7bcfaae0c6fbf83d561e1ae3851e4347e97 | /bin/make_csv.py | 495a21783bb918ed1f2b4a909d66be385118115a | [
"MIT"
] | permissive | flyeven/nosfinanceslocales_scraper | 016743bf5ed11fd0c5c0f735a4c5e4aa2b1a4900 | 8d694263a1ee6fe989138dbf7ee478812f298f00 | refs/heads/master | 2021-01-16T20:46:09.606392 | 2015-11-29T14:02:39 | 2015-11-29T14:02:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,790 | py | # -*- coding: utf-8 -*-
import os
import sys
import json
import codecs
from unicodecsv import DictWriter
PROJ_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(PROJ_DIR)
from localfinance.utils import get_all_variables_by_locality
MAPPING_FILES = os.listdir('data/mapping')
def make_csv():
data_files = os.listdir('scraped_data')
fieldnames_by_locality = get_all_variables_by_locality()
for zone_type in ['city', 'department', 'epci', 'region']:
print "Make %s csv..." % zone_type
locality_data_files = [data_file for data_file in data_files if zone_type in data_file]
variables_mapping = {
'name': u'nom',
'year': u'année',
'zone_type': u'type de zone administrative',
'population': u'population',
'insee_code': u'cog (code officiel géographique)',
'url': u'url'
}
fieldnames = ['year', 'zone_type', 'name', 'population', 'insee_code', 'url'] \
+ sorted(fieldnames_by_locality[zone_type].keys())
variables_mapping.update(fieldnames_by_locality[zone_type])
if zone_type == 'epci':
fieldnames.append('siren')
with open(os.path.join('nosdonnees', zone_type + '_all.csv'), 'w') as output:
csv_output = DictWriter(output, fieldnames=fieldnames, encoding='utf-8')
csv_output.writerow(variables_mapping)
for locality_data_file in locality_data_files:
with codecs.open(os.path.join('scraped_data', locality_data_file), encoding='utf-8') as input:
for line in input:
data = json.loads(line, encoding='utf-8')['data']
csv_output.writerow(data)
make_csv() | [
"francois.massot@gmail.com"
] | francois.massot@gmail.com |
15bd6f29ed5617bbd897682594f52dd64bb410e2 | 6fa554aff1f9507eca282ebd1352fb6689ad4842 | /부록02/helloworld-gae/main.py | 99283f2a5469666030a99944aa94a37db682866f | [] | no_license | wikibook/flask | 88260843b56c3bde2f811515d34b3561e9bcd612 | b3c9a4e4c2a88ffb4ada3c0aabe781b590016762 | refs/heads/master | 2022-12-12T21:27:26.202369 | 2021-08-10T05:35:42 | 2021-08-10T05:35:42 | 15,384,305 | 79 | 85 | null | 2022-12-09T05:35:03 | 2013-12-23T00:08:26 | Python | UTF-8 | Python | false | false | 129 | py | # -*- coding: utf-8 -*-
from google.appengine.ext.webapp.util import run_wsgi_app
from helloworld import app
run_wsgi_app(app)
| [
"dylee@wikibook.co.kr"
] | dylee@wikibook.co.kr |
af09a62a78ef5c56df8535e4e0c7ee9b704b9f7d | 2004c9907c47dfabe2be88be543a0b3e4d07931e | /Docs_Geometry_Working_With_NumPy.py | c0ef3c29ed7dcc410277702c359b2d1311846879 | [
"MIT"
] | permissive | hero/Tutorial_Open3D | 17cd28bad0fa89346455ec1af26d8aae5b03684c | 58d11e5673783855bae267373919355d5085fc24 | refs/heads/main | 2023-04-16T16:30:55.290624 | 2021-04-14T22:23:15 | 2021-04-14T22:23:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,815 | py | import open3d as o3d
import numpy as np
# http://www.open3d.org/docs/release/tutorial/geometry/working_with_numpy.html
########################################################################################################################
# 1. Working with NumPy
########################################################################################################################
# Generate some neat n times 3 matrix using a variant of sync function
x = np.linspace(-3, 3, 401)
mesh_x, mesh_y = np.meshgrid(x, x)
z = np.sinc((np.power(mesh_x, 2) + np.power(mesh_y, 2)))
z_norm = (z - z.min()) / (z.max() - z.min())
xyz = np.zeros((np.size(mesh_x), 3))
xyz[:, 0] = np.reshape(mesh_x, -1)
xyz[:, 1] = np.reshape(mesh_y, -1)
xyz[:, 2] = np.reshape(z_norm, -1)
print('xyz')
print(xyz)
########################################################################################################################
# 2. From NumPy to open3d.PointCloud
########################################################################################################################
# Pass xyz to Open3D.o3d.geometry.PointCloud and visualize
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(xyz)
o3d.io.write_point_cloud("sync.ply", pcd)
########################################################################################################################
# 3. From open3d.PointCloud to NumPy
########################################################################################################################
# Load saved point cloud and visualize it
pcd_load = o3d.io.read_point_cloud("sync.ply")
# Convert Open3D.o3d.geometry.PointCloud to numpy array
xyz_load = np.asarray(pcd_load.points)
print('xyz_load')
print(xyz_load)
o3d.visualization.draw_geometries([pcd_load], width=800, height=600)
| [
"zhy29563@hotmail.com"
] | zhy29563@hotmail.com |
ca2d324cb381f116518530e8cab2217007960b88 | 5324f3f18d405085844499067d7594ac478a9841 | /catalog/views.py | 7f8535cf22f8377fab31f745c7a9bd7ed00e2f9a | [] | no_license | stenus/django-food-nutrients | a4c7194a4058898e1c54a1683d66c66bbf52cb47 | 95362aff7708386584c8118c65790039c4bab61b | refs/heads/master | 2023-02-24T09:17:20.296566 | 2021-01-29T07:38:24 | 2021-01-29T07:38:24 | 330,675,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | from django.shortcuts import render
from django.views import generic
from catalog.models import Food
def index(request):
num_foods = Food.objects.all().count()
context = {
'num_foods': num_foods,
}
# Render the HTML template index.html with the data in the context variable
return render(request, 'index.html', context=context)
class FoodListView(generic.ListView):
paginate_by = 100
model = Food
class FoodDetailView(generic.DetailView):
model = Food
def about(request):
num_foods = Food.objects.all().count()
context = {
'num_foods': num_foods,
}
return render(request, 'about.html', context=context)
| [
"markin3617@gmail.com"
] | markin3617@gmail.com |
05eab202018c6a9bdafb95aee596355962caa256 | 7fd9c743e3fdbf41da3b550bffcae70e7615ebd5 | /Job/admin.py | ad015452d00c1a417bf6bb1283532cccbbc49784 | [] | no_license | kshitiiij/Portfolio-Blog | 355f3fef4396fe569367207f4ef1e2fd4d307e27 | 87480a0cfa715c13335385dde188644075ff1c4a | refs/heads/master | 2022-08-01T06:31:02.263796 | 2020-05-18T17:16:53 | 2020-05-18T17:16:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from django.contrib import admin
from .models import Job
admin.site.register(Job)
| [
"Kshitij-10@users.noreply.github.com"
] | Kshitij-10@users.noreply.github.com |
cd78723b80066930b40ea5063d8849ad8d003dfe | b789784b2e3c9938f4434d5ab70f169a70f6dbcf | /catkin_ws/build/leap_motion/catkin_generated/generate_cached_setup.py | bfe3428e267e404a19ee81ba518d482b8179e443 | [] | no_license | ut-ims-robotics-sandbox/kustu_k_sandbox | 472e27f9e2ce9f0a0e6215c91859296960838b9f | 1bde79fc9e61dc691e694e916e909b1c1c4c6d70 | refs/heads/master | 2022-12-23T03:02:27.018517 | 2020-10-01T19:08:28 | 2020-10-01T19:08:28 | 299,899,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,319 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/kustu/catkin_ws/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/kustu/catkin_ws/devel/.private/leap_motion/env.sh')
output_filename = '/home/kustu/catkin_ws/build/leap_motion/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
| [
"kustu11@hot.ee"
] | kustu11@hot.ee |
3dfffc0c816b2afd57cd7b4250788052eb8bb807 | a6719f4815ff41d3a1f09e9a63a64c4582d03702 | /function_and_scope/func_call.py | 224a49fd81ba7ad18b41c7b075f32fa82a7f7447 | [
"MIT"
] | permissive | thanh-vt/python-basic-programming | 8136007b8435dae6339ae33015fe536e21b19d1d | 5fe817986fbef2649b4b03955f07b59d2a2035d8 | refs/heads/main | 2023-01-30T12:57:36.819687 | 2020-12-13T17:27:05 | 2020-12-13T17:27:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | from func_declare import *
# normal function
hello_func()
# argument
intro_func('Thanh')
# multiple arguments
intro_func_2('Thanh', 'Tat', 'Vu')
# unknown number of arguments
fruit_intro_func('banana', 'orange', 'watermelon')
# key word arguments
child_intro_func(child1='Emil', child2='Tobias', child3='Linus')
# arbitrary keyword arguments
child_intro_func_2(fname="Tobias", lname='Refsnes')
# default value of argument
country_intro_func()
# list of arguments:
fruits = {'apple', 'banana', 'cherry'}
fruit_list_func(fruits)
# return value:
x = multiply_by_5_func(2)
print(x)
# empty body
empty_func()
# recursion
y = factorial_func(5)
print(y)
| [
"thanhvt@vissoft.vn"
] | thanhvt@vissoft.vn |
88c9a1349f58ff582cf27895cc9a2ff93e45c5c1 | dbb3e6ffc1dca4c3707b5010a565fc06a7dda259 | /flow_shop_scheduling/labolatorium2/critial_path.py | d99b24de82fa916a957e454faa242a3d3cdfd3f8 | [] | no_license | goorkamateusz/SPD-collage-course | 63162fd493f32ffc381bdf8193a15575665337cb | 191b203143db8800759dedcb90f7b352e6e457fd | refs/heads/master | 2023-06-05T08:18:50.256862 | 2021-06-28T21:01:19 | 2021-06-28T21:01:19 | 344,195,256 | 1 | 0 | null | 2021-06-16T16:22:39 | 2021-03-03T16:43:43 | Python | UTF-8 | Python | false | false | 1,363 | py | from labolatorium1.calculable_lib import TaskAssigned
from typing import List
class CritialPath:
def __init__(self):
self._dictionary = {}
self._critical_path = []
self._modified = True
def add(self, task: TaskAssigned) -> None:
if task.finish in self._dictionary:
self._dictionary[task.finish].append(task)
else:
self._dictionary[task.finish] = [task]
self._modified = True
def get_path(self) -> List[TaskAssigned]:
if self._modified:
self._calculate()
return self._critical_path
def _calculate(self) -> None:
task = self._get_last_task()
self._critical_path.insert(0, task)
while task.start > 0:
task = self._get_task_before(task)
self._critical_path.insert(0, task)
self._modified = False
def _get_last_task(self) -> TaskAssigned:
finish_time = max(self._dictionary.keys())
return self._dictionary[finish_time][0]
def _get_task_before(self, task: TaskAssigned) -> TaskAssigned:
for t in self._dictionary[task.start]:
if t == task:
return t
for t in self._dictionary[task.start]:
if t.machine_id == task.machine_id:
return t
raise Exception("Mysle ze to jest nie mozliwe")
| [
"maatiug@gmail.com"
] | maatiug@gmail.com |
9d70090237b66427734a22225c66cb8ed89bb423 | de92261317c6f900ad7bbb0bf427e87e793130f5 | /add_two_numbers.py | 78c6ad87a267e37b26979cc128257513b3c82e0b | [] | no_license | din00tavares/LeetCode | 97bd2f27d190ef80a798cb79d81e8c3914662730 | eb140e38c74fb9ad44b287ca311e093d977ee66d | refs/heads/main | 2023-06-13T00:30:23.629144 | 2021-06-28T00:52:29 | 2021-06-28T00:52:29 | 371,339,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
def __repr__(self):
return "ListNode(val=" + str(self.val) + ", next=" + str(self.next) + ")"
class Solution:
def list_to_ListNode(self,l):
if type(l) is not list or len(l) == 0:
return None
return ListNode(val = l[0],next= self.list_to_ListNode(self,l[1:]) if len(l)> 1 else None)
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
def check_list(l):
r = list()
while True:
if type(l.val) is not int or l.val< 0 or l.val> 9:
raise Exception(ValueError,'''Values must be integers between 0 and 9.
Error on value {value} at index {index}'''.format(value= l.val, index= i))
else:
r.append(str(l.val))
if l.next is None:
break
else:
l = l.next
return int(''.join(reversed(r)))
return self.list_to_ListNode(self,list(reversed([int(c) for c in str(check_list(l1) + check_list(l2))])))
l1 = Solution.list_to_ListNode(Solution,[2,4,3])
l2 = Solution.list_to_ListNode(Solution,[5,6,4])
n = Solution.addTwoNumbers(Solution,l1,l2)
print(n) | [
"noreply@github.com"
] | din00tavares.noreply@github.com |
8d23d4618859696f54b62339235368d1eff128b9 | d65c3bdbb6d87e6a9de90b38eb5301e5bc61fc3f | /app/api/errors.py | 05ded22174c8a0f5ffe8afc273c57a83c7a65e4c | [] | no_license | pshenglh/share | 850d1af63f06a2be9a3b9164816670bff12c8284 | 8953a9c9682781e168028cc35e23a486363cf168 | refs/heads/master | 2021-01-24T08:16:39.820901 | 2017-06-14T14:00:17 | 2017-06-14T14:00:17 | 93,375,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | from flask import jsonify
from . import api
from app.exceptions import ValidationError
def forbidden(message):
response = jsonify({'error': 'forbidden', 'message': message})
response.status_code = 403
return response
def unauthorized(message):
response = jsonify({'error': 'unauthorized', 'message': message})
response.status_code = 401
return response
def bad_request(message):
response = jsonify({'error': 'bad request', 'message': message})
response.status_code = 400
return response
@api.errorhandler(ValidationError)
def validation_error(e):
return bad_request(e.args[0]) | [
"psheng@outlook.com"
] | psheng@outlook.com |
bc702a3dd6adcee1acade2c0b2d9058a23e51543 | 2e440f944f17835f048eb444100271e42daf37cf | /python2.7libs/PyC3D/vector3.py | 75d612281b1056f6082bb74e7cc1326be1b04508 | [] | no_license | all-in-one-of/C3DtoHoudini | e4eb8d480074bcb920217b2d8017a0f512f331bf | 155c62e7d1ddc19957495d0d4e87cc0c079b6c1d | refs/heads/master | 2021-01-04T13:47:11.477418 | 2014-10-09T22:43:30 | 2014-10-09T22:43:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,453 | py | from math import *
from util import format_number
class Vector3(object):
__slots__ = ('_v',)
def __init__(self, *args):
"""Creates a Vector3 from 3 numeric values or a list-like object
containing at least 3 values. No arguments result in a null vector.
"""
if len(args) == 3:
self._v = map(float, args[:3])
return
if not args:
self._v = [0., 0., 0.]
elif len(args) == 1:
self._v = map(float, args[0][:3])
else:
raise ValueError("Vector3.__init__ takes 0, 1 or 3 parameters")
@classmethod
def from_points(cls, p1, p2):
v = cls.__new__(cls, object)
ax, ay, az = p1
bx, by, bz = p2
v._v = [bx-ax, by-ay, bz-az]
return v
@classmethod
def from_floats(cls, x, y, z):
"""Creates a Vector3 from individual float values.
Warning: There is no checking for efficiency here: x, y, z _must_ be
floats.
"""
v = cls.__new__(cls, object)
v._v = [x, y, z]
return v
@classmethod
def from_iter(cls, iterable):
"""Creates a Vector3 from an iterable containing at least 3 values."""
it = iter(iterable)
next = it.next
v = cls.__new__(cls, object)
v._v = [ float(next()), float(next()), float(next()) ]
return v
def copy(self):
"""Returns a copy of this vector."""
v = self.__new__(self.__class__, object)
v._v = self._v[:]
return v
#return self.from_floats(self._v[0], self._v[1], self._v[2])
__copy__ = copy
def _get_x(self):
return self._v[0]
def _set_x(self, x):
assert isinstance(x, float), "Must be a float"
self._v[0] = x
x = property(_get_x, _set_x, None, "x component.")
def _get_y(self):
return self._v[1]
def _set_y(self, y):
assert isinstance(y, float), "Must be a float"
self._v[1] = y
y = property(_get_y, _set_y, None, "y component.")
def _get_z(self):
return self._v[2]
def _set_z(self, z):
assert isinstance(z, float), "Must be a float"
self._v[2] = z
z = property(_get_z, _set_z, None, "z component.")
def _get_length(self):
x, y, z = self._v
return sqrt(x*x + y*y +z*z)
def _set_length(self, length):
v = self._v
try:
x, y, z = v
l = length / sqrt(x*x + y*y +z*z)
except ZeroDivisionError:
v[0] = 0.
v[1] = 0.
v[2] = 0.
return self
v[0] = x*l
v[1] = y*l
v[2] = z*l
length = property(_get_length, _set_length, None, "Length of the vector")
def unit(self):
"""Returns a unit vector."""
x, y, z = self._v
l = sqrt(x*x + y*y + z*z)
return self.from_floats(x/l, y/l, z/l)
def set(self, x, y, z):
"""Sets the components of this vector.
x -- x component
y -- y component
z -- z component
"""
assert ( isinstance(x, float) and
isinstance(y, float) and
isinstance(z, float) ), "x, y, z must be floats"
v = self._v
v[0] = x
v[1] = y
v[2] = z
return self
def __str__(self):
x, y, z = self._v
return "(%s, %s, %s)" % (format_number(x),
format_number(y),
format_number(z))
def __repr__(self):
x, y, z = self._v
return "Vector3(%s, %s, %s)" % (x, y, z)
def __len__(self):
return 3
def __iter__(self):
"""Iterates the components in x, y, z order."""
return iter(self._v[:])
def __getitem__(self, index):
"""Retrieves a component, given its index.
index -- 0, 1 or 2 for x, y or z
"""
try:
return self._v[index]
except IndexError:
raise IndexError, "There are 3 values in this object, index should be 0, 1 or 2!"
def __setitem__(self, index, value):
"""Sets a component, given its index.
index -- 0, 1 or 2 for x, y or z
value -- New (float) value of component
"""
assert isinstance(value, float), "Must be a float"
try:
self._v[index] = value
except IndexError:
raise IndexError, "There are 3 values in this object, index should be 0, 1 or 2!"
def __eq__(self, rhs):
"""Test for equality
rhs -- Vector or sequence of 3 values
"""
x, y, z = self._v
xx, yy, zz = rhs
return x==xx and y==yy and z==zz
def __ne__(self, rhs):
"""Test of inequality
rhs -- Vector or sequenece of 3 values
"""
x, y, z = self._v
xx, yy, zz = rhs
return x!=xx or y!=yy or z!=zz
def __hash__(self):
return hash(tuple(self._v))
def __add__(self, rhs):
"""Returns the result of adding a vector (or collection of 3 numbers)
from this vector.
rhs -- Vector or sequence of 2 values
"""
x, y, z = self._v
ox, oy, oz = rhs
return self.from_floats(x+ox, y+oy, z+oz)
def __iadd__(self, rhs):
"""Adds another vector (or a collection of 3 numbers) to this vector.
rhs -- Vector or sequence of 2 values
"""
ox, oy, oz = rhs
v = self._v
v[0] += ox
v[1] += oy
v[2] += oz
return self
def __radd__(self, lhs):
"""Adds vector to this vector (right version)
lhs -- Left hand side vector or sequence
"""
x, y, z = self._v
ox, oy, oz = lhs
return self.from_floats(x+ox, y+oy, z+oz)
def __sub__(self, rhs):
"""Returns the result of subtracting a vector (or collection of
3 numbers) from this vector.
rhs -- 3 values
"""
x, y, z = self._v
ox, oy, oz = rhs
return self.from_floats(x-ox, y-oy, z-oz)
def _isub__(self, rhs):
"""Subtracts another vector (or a collection of 3 numbers) from this
vector.
rhs -- Vector or sequence of 3 values
"""
ox, oy, oz = rhs
v = self._v
v[0] -= ox
v[1] -= oy
v[2] -= oz
return self
def __rsub__(self, lhs):
"""Subtracts a vector (right version)
lhs -- Left hand side vector or sequence
"""
x, y, z = self._v
ox, oy, oz = lhs
return self.from_floats(ox-x, oy-y, oz-z)
def scalar_mul(self, scalar):
v = self._v
v[0] *= scalar
v[1] *= scalar
v[2] *= scalar
def vector_mul(self, vector):
x, y, z = vector
v= self._v
v[0] *= x
v[1] *= y
v[2] *= z
def get_scalar_mul(self, scalar):
x, y, z = self.scalar
return self.from_floats(x*scalar, y*scalar, z*scalar)
def get_vector_mul(self, vector):
x, y, z = self._v
xx, yy, zz = vector
return self.from_floats(x * xx, y * yy, z * zz)
def __mul__(self, rhs):
"""Return the result of multiplying this vector by another vector, or
a scalar (single number).
rhs -- Vector, sequence or single value.
"""
x, y, z = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
return self.from_floats(x*ox, y*oy, z*oz)
else:
return self.from_floats(x*rhs, y*rhs, z*rhs)
def __imul__(self, rhs):
"""Multiply this vector by another vector, or a scalar
(single number).
rhs -- Vector, sequence or single value.
"""
v = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
v[0] *= ox
v[1] *= oy
v[2] *= oz
else:
v[0] *= rhs
v[1] *= rhs
v[2] *= rhs
return self
def __rmul__(self, lhs):
x, y, z = self._v
if hasattr(lhs, "__getitem__"):
ox, oy, oz = lhs
return self.from_floats(x*ox, y*oy, z*oz)
else:
return self.from_floats(x*lhs, y*lhs, z*lhs)
def __div__(self, rhs):
"""Return the result of dividing this vector by another vector, or a scalar (single number)."""
x, y, z = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
return self.from_floats(x/ox, y/oy, z/oz)
else:
return self.from_floats(x/rhs, y/rhs, z/rhs)
def __idiv__(self, rhs):
"""Divide this vector by another vector, or a scalar (single number)."""
v = self._v
if hasattr(rhs, "__getitem__"):
v[0] /= ox
v[1] /= oy
v[2] /= oz
else:
v[0] /= rhs
v[1] /= rhs
v[2] /= rhs
return self
def __rdiv__(self, lhs):
x, y, z = self._v
if hasattr(lhs, "__getitem__"):
ox, oy, oz = lhs
return self.from_floats(ox/x, oy/y, oz/z)
else:
return self.from_floats(lhs/x, lhs/y, lhs/z)
def scalar_div(self, scalar):
v = self._v
v[0] /= scalar
v[1] /= scalar
v[2] /= scalar
def vector_div(self, vector):
x, y, z = vector
v= self._v
v[0] /= x
v[1] /= y
v[2] /= z
def get_scalar_div(self, scalar):
x, y, z = self.scalar
return self.from_floats(x / scalar, y / scalar, z / scalar)
def get_vector_div(self, vector):
x, y, z = self._v
xx, yy, zz = vector
return self.from_floats(x / xx, y / yy, z / zz)
def __neg__(self):
"""Returns the negation of this vector (a vector pointing in the opposite direction.
eg v1 = Vector(1,2,3)
print -v1
>>> (-1,-2,-3)
"""
x, y, z = self._v
return self.from_floats(-x, -y, -z)
def __pos__(self):
return self.copy()
def __nonzero__(self):
x, y, z = self._v
return x and y and z
def __call__(self, keys):
"""Returns a tuple of the values in a vector
keys -- An iterable containing the keys (x, y or z)
eg v = Vector3(1.0, 2.0, 3.0)
v('zyx') -> (3.0, 2.0, 1.0)
"""
ord_x = ord('x')
v = self._v
return tuple( v[ord(c)-ord_x] for c in keys )
def as_tuple(self):
"""Returns a tuple of the x, y, z components. A little quicker than
tuple(vector)."""
return tuple(self._v)
def scale(self, scale):
"""Scales the vector by onther vector or a scalar. Same as the
*= operator.
scale -- Value to scale the vector by
"""
v = self._v
if hasattr(rhs, "__getitem__"):
ox, oy, oz = rhs
v[0] *= ox
v[1] *= oy
v[2] *= oz
else:
v[0] *= rhs
v[1] *= rhs
v[2] *= rhs
return self
def get_length(self):
"""Calculates the length of the vector."""
x, y, z = self._v
return sqrt(x*x + y*y +z*z)
get_magnitude = get_length
def set_length(self, new_length):
"""Sets the length of the vector. (Normalises it then scales it)
new_length -- The new length of the vector.
"""
v = self._v
try:
x, y, z = v
l = new_length / sqrt(x*x + y*y + z*z)
except ZeroDivisionError:
v[0] = 0.0
v[1] = 0.0
v[2] = 0.0
return self
v[0] = x*l
v[1] = y*l
v[2] = z*l
return self
def get_distance_to(self, p):
"""Returns the distance of this vector to a point.
p -- A position as a vector, or collection of 3 values.
"""
ax, ay, az = self._v
bx, by, bz = p
dx = ax-bx
dy = ay-by
dz = az-bz
return sqrt( dx*dx + dy*dy + dz*dz )
def get_distance_to_squared(self, p):
"""Returns the squared distance of this vector to a point.
p -- A position as a vector, or collection of 3 values.
"""
ax, ay, az = self._v
bx, by, bz = p
dx = ax-bx
dy = ay-by
dz = az-bz
return dx*dx + dy*dy + dz*dz
def normalise(self):
"""Scales the vector to be length 1."""
v = self._v
x, y, z = v
l = sqrt(x*x + y*y + z*z)
try:
v[0] /= l
v[1] /= l
v[2] /= l
except ZeroDivisionError:
v[0] = 0.0
v[1] = 0.0
v[2] = 0.0
return self
normalize = normalise
def get_normalised(self):
x, y, z = self._v
l = sqrt(x*x + y*y + z*z)
return self.from_floats(x/l, y/l, z/l)
get_normalized = get_normalised
def in_sphere(self, sphere):
"""Returns true if this vector (treated as a position) is contained in
the given sphere.
"""
return distance3d(sphere.position, self) <= sphere.radius
def dot(self, other):
"""Returns the dot product of this vector with another.
other -- A vector or tuple
"""
x, y, z = self._v
ox, oy, oz = other
return x*ox + y*oy + z*oz
def cross(self, other):
"""Returns the cross product of this vector with another.
other -- A vector or tuple
"""
x, y, z = self._v
bx, by, bz = other
return self.from_floats( y*bz - by*z,
z*bx - bz*x,
x*by - bx*y )
def cross_tuple(self, other):
"""Returns the cross product of this vector with another, as a tuple.
This avoids the Vector3 construction if you don't need it.
other -- A vector or tuple
"""
x, y, z = self._v
bx, by, bz = other
return ( y*bz - by*z,
z*bx - bz*x,
x*by - bx*y )
def distance3d_squared(p1, p2):
x, y, z = p1
xx, yy, zz = p2
dx = x - xx
dy = y - yy
dz = z - zz
return dx*dx + dy*dy +dz*dz
def distance3d(p1, p2):
x, y, z = p1
xx, yy, zz = p2
dx = x - xx
dy = y - yy
dz = z - zz
return sqrt(dx*dx + dy*dy +dz*dz)
def centre_point3d(points):
return sum( Vector3(p) for p in points ) / len(points)
if __name__ == "__main__":
v1 = Vector3(2.2323, 3.43242, 1.)
print 3*v1
print (2, 4, 6)*v1
print (1, 2, 3)+v1
print v1('xxxyyyzzz')
print v1[2]
print v1.z
v1[2]=5.
print v1
v2= Vector3(1.2, 5, 10)
print v2
v1 += v2
print v1.get_length()
print repr(v1)
print v1[1]
p1 = Vector3(1,2,3)
print p1
print repr(p1)
for v in p1:
print v
#print p1[6]
ptest = Vector3( [1,2,3] )
print ptest
z = Vector3()
print z
file("test.txt", "w").write( "\n".join(str(float(n)) for n in range(20)) )
f = file("test.txt")
v1 = Vector3.from_iter( f )
v2 = Vector3.from_iter( f )
v3 = Vector3.from_iter( f )
print v1, v2, v3
print "--"
print v1
print v1 + (10,20,30)
print v1('xz')
print -v1
#print tuple(ptest)
#p1.set( (4, 5, 6) )
#print p1
print Vector3(10,10,30)+v1
| [
"mikedatsik@gmail.com"
] | mikedatsik@gmail.com |
611c1256e6469b7ceef7838061cd304f5e2f09ec | dcebc9eba57f252874a336b4b396c1dea328e850 | /py/dd_match_fields.py | 0f91450376fbfa3b376a9c2a4b765fef32abf897 | [
"Apache-2.0"
] | permissive | bcgov/diputils | d7408ceb7d02c1583bba75e515cb3f93e2e07a09 | caf510c81f7f43372d4a8e18f77eaa86cdede6a5 | refs/heads/master | 2022-05-15T01:02:13.289995 | 2022-05-08T22:02:54 | 2022-05-08T22:02:54 | 231,476,522 | 5 | 1 | Apache-2.0 | 2022-05-08T22:02:55 | 2020-01-02T23:31:46 | Python | UTF-8 | Python | false | false | 3,121 | py | # Copyright 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#20190214 dd_match.py: match dat file with data dictionary (csv2)
import os
import sys
'''
grep -n DE.HLTH_PROD_LABEL ./dd/*.csv2
data_dictionary_pharmanet-january-1-1996-onwards.xlsx_dsp_rpt.A
dd_fields.exe data_dictionary_pharmanet-january-1-1996-onwards.xlsx_dsp_rpt.A
'''
labels_for_file = {}
files = os.popen("ls -1 ~/dd/*.csv2").read().strip().split('\n')
for i in range(0, len(files)):
files[i] = files[i].strip()
f = open(os.popen('cygpath -d ' + files[i]).read().strip())
lines = f.read().strip().split("\n")
w = lines[0].strip().split(',')
if w[0].lower() == 'start':
# print w
labels = []
for line in lines:
line = line.strip()
w = line.split(',')
labels.append(w[3].lower())
labels_for_file[files[i]] = labels # store the labels from this file, according to filename
# field names for extract (no data here):
lines = ["Ft_schlstud.A.dat STUDYID SPECIAL_NEED_CODE_THIS_COLL",
"DAD STUDYID DIAGX1 DSC ADDATE",
"MSP STUDYID SPEC ICD9 ICD9_1 ICD9_2 ICD9_3 ICD9_4 ICD9_5 SERVCODE servdate",
"DES_REP.A DE.STUDYID DE.HLTH_PROD_LABEL DE.DSPD_QTY DE.SRV_DATE",
"HLTH_REP.A HP.DIN_PIN HP.GEN_DRUG"]
o_f = open("extract_me.csv", "wb")
o_f.write('\n'.join(lines))
o_f.close()
# now attempt to match the labelsets from the file, with the above:
lines = open("extract_me.csv").read().strip().split('\n')
dd_matches = []
for i in range(0, len(lines)):
line = lines[i].strip().lower().split()
line = line[1:]
print ",".join(line)
max_score, max_f = 0, []
matched = []
for f in labels_for_file:
labels = labels_for_file[f]
score = 0
for label_to_match in line:
# if print "\t", label_to_match
if label_to_match in labels:
score += 1
if label_to_match not in matched:
matched.append(label_to_match)
# make sure to handle multiple matches for the same thing
if score == max_score:
max_f.append(f)
if score > max_score:
max_f = [f]
max_score = score
print "\n\t", max_score, "/", len(line), line, "\n\t-------> ", "MATCH" if max_score==len(line) -1 else ""
print "\tvarmatch", matched
for f in max_f:
print "\t\t", f
dd_matches.append(f.strip()) # list all dd we want to use to extract
f = open("dd_match_fields_selected_dd.txt", "wb")
f.write('\n'.join(dd_matches))
f.close()
| [
"richardson.ashlin@gmail.com"
] | richardson.ashlin@gmail.com |
2591fab72417dd22098a2d648b9a0eb3d6d035d2 | 0a40a0d63c8fce17f4a686e69073a4b18657b160 | /test/functional/feature_uacomment.py | 75b5a9c538e3ca66a92d254faac84d250742b090 | [
"MIT"
] | permissive | MotoAcidic/Cerebellum | 23f1b8bd4f2170c1ed930eafb3f2dfff07df1c24 | 6aec42007c5b59069048b27db5a8ea1a31ae4085 | refs/heads/main | 2023-05-13T06:31:23.481786 | 2021-06-09T15:28:28 | 2021-06-09T15:28:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,492 | py | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uacomment option."""
from test_framework.test_framework import CerebellumTestFramework
from test_framework.util import assert_equal
class UacommentTest(CerebellumTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = "exceeds maximum length (256). Reduce the number or size of uacomments."
self.assert_start_raises_init_error(0, ["-uacomment=" + 'a' * 256], expected)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')']:
expected = "User Agent comment (" + unsafe_char + ") contains unsafe characters"
self.assert_start_raises_init_error(0, ["-uacomment=" + unsafe_char], expected)
if __name__ == '__main__':
UacommentTest().main()
| [
"travisfinch01@gmail.com"
] | travisfinch01@gmail.com |
f6efbe0d2135baeedb2748f5ec9057dab27256a2 | bb78aaf3f2e18da1d2e1f2d92d3e7ed37b054ee4 | /fuzzysearchapp/settings.py | e79356543abe8e027668178171c4c22f79d71cd7 | [] | no_license | Rijo13/fuzzysearchapp | 866bcbf2c5636fbbcd255313567e953a9674e76a | 2d0bbe57c0b8723049c04a07ec3388e4d134dc37 | refs/heads/master | 2020-04-06T10:16:54.931357 | 2018-11-16T18:29:13 | 2018-11-16T18:29:13 | 157,374,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,548 | py | """
Django settings for fuzzysearchapp project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w*=5wqj0s)9+ol+!)2i#1n__60ir!24&q_ajahedf%1x@7)0js'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'fuzzysearchapp.urls'
# get template folders and add to TEMPLATES
project_templates = os.path.join(os.path.dirname(BASE_DIR), "fuzzysearchapp")
fuzzy_search_templates = os.path.join(os.path.dirname(project_templates), "fuzzysearch", "templates")
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
project_templates,
# fuzzy_search_templates,
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fuzzysearchapp.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
# path to the tsv file
TSV_FILE_LOCATION = 'E:\\____Rijo_bkp_Aug_2017\\DevelopmentNew\\djangoapps\\fuzzysearchapp\\word_search.tsv'
| [
"rijopjoseph13@gmail.com"
] | rijopjoseph13@gmail.com |
962654d69b6fccd4430ee25d6fb2b35603e978b2 | 9cc5f9f0f3233bbd795275a9ea679b8061351583 | /s3.py | 204c945916ef494219e958ef12cb893912fd65f5 | [] | no_license | barnettrob/boto3_s3_file_upload | 436c83b0bcd44a4c22bd3960b7f1c09fbcd9797e | 356b5346f4b640bb6e3604b0ee489cca1b733dd9 | refs/heads/master | 2020-04-10T02:17:13.209825 | 2018-12-11T21:29:03 | 2018-12-11T21:29:03 | 160,740,399 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,162 | py | from flask import Flask, flash, request, render_template, redirect
import boto3
import os
ALLOWED_EXTENSIONS = set(['pdf'])
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def index():
return render_template('index.html', path=request.script_root)
@app.route('/upload', methods=['POST'])
def upload():
index_page = request.url_root
if 's3file' not in request.files:
flash('Something went wrong. No file found')
return redirect(request.url)
if request.files['s3file'].filename == '':
flash('No file selected')
if request.files['s3file'] and allowed_file(request.files['s3file'].filename):
# Store access key and secret key for s3 bucket uploading to in a config file
# uploaded to your other s3 bucket. Point to this config file in zappa_settings.json:
# {
# "dev": {
# ...
# "remote_env": "s3://my-other-bucket/super-secret-config.json",
# },
# ...
# }
access_key = os.environ.get('access_key')
secret_key = os.environ.get('secret_key')
s3 = boto3.client(
"s3",
aws_access_key_id=access_key,
aws_secret_access_key=secret_key
)
s3.upload_fileobj(
request.files['s3file'],
'edb-cdn-downloads',
'docs/' + request.files['s3file'].filename,
ExtraArgs={
"ACL": "public-read",
"ContentType": request.files['s3file'].content_type
}
)
file_url = 'https://get.enterprisedb.com/docs/' + request.files['s3file'].filename
return '''<h3>File saved to s3 bucket</h3>
<p><a href="''' + file_url + '" target="_blank">' + file_url + '''</a></p>
<p><a href="''' + index_page + '">Upload another file</a></p>'
else:
return '''<h5>You can only upload a PDF.</h5>
<p><a href="''' + index_page + '">Upload another file</a></p>'
if __name__ == '__main__':
app.run()
| [
"robert.barnett@laptop210.ma.us"
] | robert.barnett@laptop210.ma.us |
6c9178f13779fa4d7a1c76768de12543cd42748f | be9d82f263466b397d354798fcf260ceee7f8419 | /scripts/docs.py | 02ecae1eeb67800e7d5b17c1c4238c78b8fb1b18 | [
"MIT",
"LicenseRef-scancode-generic-export-compliance"
] | permissive | pradeepbhadani/dffml | 0af135045a6d0bbde9b6f2d539839107bc0b5181 | 35bc31be462685efe78ede981dbef8fd5577882b | refs/heads/master | 2020-06-24T17:05:37.415285 | 2019-07-26T15:19:22 | 2019-07-26T15:19:22 | 199,024,888 | 0 | 0 | MIT | 2019-07-26T15:19:23 | 2019-07-26T13:50:26 | Python | UTF-8 | Python | false | false | 5,883 | py | # SPDX-License-Identifier: MIT
# Copyright (c) 2019 Intel Corporation
import os
import getpass
import inspect
import argparse
import pkg_resources
from typing import List
def traverse_get_config(target, *args):
current = target
last = target
for level in args:
last = current[level]
current = last["config"]
return current
TEMPLATE = """{name}
{underline}
*{maintenance}*
{help}"""
def data_type_string(data_type, nargs=None):
if nargs is not None:
return "List of %ss" % (data_type_string(data_type).lower(),)
if data_type is str:
return "String"
elif data_type is int:
return "Integer"
elif data_type is bool:
return "Boolean"
return data_type.__qualname__
def sanitize_default(default):
if not isinstance(default, str):
return str(default)
return default.replace(getpass.getuser(), "user")
def build_args(config):
args = []
for key, value in config.items():
arg = value["arg"]
if arg is None:
continue
build = ""
build += "- %s: %s\n" % (
key,
data_type_string(arg.get("type", str), arg.get("nargs", None)),
)
if "default" in arg or "help" in arg:
build += "\n"
if "default" in arg:
build += " - default: %s\n" % (sanitize_default(arg["default"]),)
if "help" in arg:
build += " - %s\n" % (arg["help"],)
args.append(build.rstrip())
if args:
return "**Args**\n\n" + "\n\n".join(args)
return False
def type_name(value):
if inspect.isclass(value):
return value.__qualname__
return value
def format_op_definitions(definitions):
for key, definition in definitions.items():
item = "- %s: %s(type: %s)" % (
key,
definition.name,
definition.primitive,
)
if definition.spec is not None:
item += "\n\n"
item += "\n".join(
[
" - %s: %s%s"
% (
name,
type_name(param.annotation),
"(default: %s)" % (param.default,)
if param.default is not inspect.Parameter.empty
else "",
)
for name, param in inspect.signature(
definition.spec
).parameters.items()
]
)
yield item
def format_op(op):
build = []
build.append("**Stage: %s**\n\n" % (op.stage.value))
if op.inputs:
build.append(
"**Inputs**\n\n" + "\n".join(format_op_definitions(op.inputs))
)
if op.outputs:
build.append(
"**Outputs**\n\n" + "\n".join(format_op_definitions(op.outputs))
)
if op.conditions:
build.append(
"**Conditions**\n\n"
+ "\n".join(
[
"- %s: %s" % (definition.name, definition.primitive)
for definition in op.conditions
]
)
)
return "\n\n".join(build)
def gen_docs(entrypoint: str, modules: List[str], maintenance: str = "Core"):
per_module = {name: [] for name in modules}
for i in pkg_resources.iter_entry_points(entrypoint):
cls = i.load()
if i.module_name.split(".")[0] not in modules:
continue
doc = cls.__doc__
if doc is None:
doc = "No description"
else:
doc = inspect.cleandoc(doc)
formatting = {
"name": i.name,
"underline": "~" * len(i.name),
"maintenance": maintenance,
"help": doc,
}
formatted = TEMPLATE.format(**formatting)
if getattr(cls, "op", False):
formatted += "\n\n" + format_op(cls.op)
defaults = cls.args({})
if defaults:
config = traverse_get_config(defaults, *cls.add_orig_label())
formatted += "\n\n" + build_args(config)
per_module[i.module_name.split(".")[0]].append(formatted)
return "\n\n".join(
[
name + "\n" + "-" * len(name) + "\n\n" + "\n\n".join(docs)
for name, docs in per_module.items()
if docs
]
)
def main():
parser = argparse.ArgumentParser(description="Generate plugin docs")
parser.add_argument("--entrypoint", help="Entrypoint to document")
parser.add_argument("--modules", help="Modules to care about")
parser.add_argument(
"--maintenance",
default="Core",
help="Maintained as a part of DFFML or community managed",
)
parser.add_argument(
"--care",
default="scripts/docs/care",
help="File with each line being: entrypoint package_name package_name...",
)
args = parser.parse_args()
if getattr(args, "entrypoint", False) and getattr(args, "modules", False):
print(gen_docs(args.entrypoint, args.modules, args.maintenance))
return
with open(args.care, "rb") as genspec:
for line in genspec:
entrypoint, modules = line.decode("utf-8").split(maxsplit=1)
modules = modules.split()
template = entrypoint.replace(".", "_") + ".rst"
output = os.path.join("docs", "plugins", template)
template = os.path.join("scripts", "docs", "templates", template)
with open(template, "rb") as template_fd, open(
output, "wb"
) as output_fd:
output_fd.write(
(
template_fd.read().decode("utf-8")
+ gen_docs(entrypoint, modules)
).encode("utf-8")
)
if __name__ == "__main__":
main()
| [
"johnandersenpdx@gmail.com"
] | johnandersenpdx@gmail.com |
2d3581e6b3a97c12d269e1d981e38cf0b7d26f46 | e55cf6abda873e8a24dc5bd4d8aa96a8ca8b8504 | /Backup/examples/reserved-classes-of-identifiers/b.py | 0a26062b8511895333885d2c1552ecf250487160 | [
"MIT"
] | permissive | neoashraf/awesome-python | b628864d128bce58f7370ff8eb907556d73d43e7 | f52831305008427b0bcb2e661c4d4d262d35f314 | refs/heads/master | 2020-04-16T05:20:00.387992 | 2018-10-06T04:49:52 | 2018-10-06T04:49:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | from a import *
print(abc)
print(_a)
print(__b)
print(__c__)
| [
"alamin.ineedahelp@gmail.com"
] | alamin.ineedahelp@gmail.com |
ab89d9099ddaa8e3f2ce598d04a34423d640e4e3 | 2b42b40ae2e84b438146003bf231532973f1081d | /spec/mgm4442591.4.spec | e660aad50fa6f53e6383d7b21b075c43dcda53ce | [] | no_license | MG-RAST/mtf | 0ea0ebd0c0eb18ec6711e30de7cc336bdae7215a | e2ddb3b145068f22808ef43e2bbbbaeec7abccff | refs/heads/master | 2020-05-20T15:32:04.334532 | 2012-03-05T09:51:49 | 2012-03-05T09:51:49 | 3,625,755 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 21,603 | spec | {
"id": "mgm4442591.4",
"metadata": {
"mgm4442591.4.metadata.json": {
"format": "json",
"provider": "metagenomics.anl.gov"
}
},
"providers": {
"metagenomics.anl.gov": {
"files": {
"100.preprocess.info": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/100.preprocess.info"
},
"100.preprocess.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 233157,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/100.preprocess.passed.fna.gz"
},
"100.preprocess.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/100.preprocess.passed.fna.stats"
},
"100.preprocess.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 47,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/100.preprocess.removed.fna.gz"
},
"150.dereplication.info": {
"compression": null,
"description": null,
"size": 778,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/150.dereplication.info"
},
"150.dereplication.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 233160,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/150.dereplication.passed.fna.gz"
},
"150.dereplication.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/150.dereplication.passed.fna.stats"
},
"150.dereplication.removed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 50,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/150.dereplication.removed.fna.gz"
},
"299.screen.info": {
"compression": null,
"description": null,
"size": 410,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/299.screen.info"
},
"299.screen.passed.fna.gcs": {
"compression": null,
"description": null,
"size": 1869,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/299.screen.passed.fna.gcs"
},
"299.screen.passed.fna.gz": {
"compression": "gzip",
"description": null,
"size": 233153,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/299.screen.passed.fna.gz"
},
"299.screen.passed.fna.lens": {
"compression": null,
"description": null,
"size": 1508,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/299.screen.passed.fna.lens"
},
"299.screen.passed.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/299.screen.passed.fna.stats"
},
"350.genecalling.coding.faa.gz": {
"compression": "gzip",
"description": null,
"size": 151313,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/350.genecalling.coding.faa.gz"
},
"350.genecalling.coding.faa.stats": {
"compression": null,
"description": null,
"size": 122,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/350.genecalling.coding.faa.stats"
},
"350.genecalling.coding.fna.gz": {
"compression": "gzip",
"description": null,
"size": 238779,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/350.genecalling.coding.fna.gz"
},
"350.genecalling.coding.fna.stats": {
"compression": null,
"description": null,
"size": 314,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/350.genecalling.coding.fna.stats"
},
"350.genecalling.info": {
"compression": null,
"description": null,
"size": 714,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/350.genecalling.info"
},
"425.usearch.rna.fna.gz": {
"compression": "gzip",
"description": null,
"size": 43761,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/425.usearch.rna.fna.gz"
},
"425.usearch.rna.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/425.usearch.rna.fna.stats"
},
"440.cluster.rna97.fna.gz": {
"compression": "gzip",
"description": null,
"size": 43458,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/440.cluster.rna97.fna.gz"
},
"440.cluster.rna97.fna.stats": {
"compression": null,
"description": null,
"size": 310,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/440.cluster.rna97.fna.stats"
},
"440.cluster.rna97.info": {
"compression": null,
"description": null,
"size": 947,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/440.cluster.rna97.info"
},
"440.cluster.rna97.mapping": {
"compression": null,
"description": null,
"size": 0,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/440.cluster.rna97.mapping"
},
"450.rna.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 39,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/450.rna.expand.lca.gz"
},
"450.rna.expand.rna.gz": {
"compression": "gzip",
"description": null,
"size": 39,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/450.rna.expand.rna.gz"
},
"450.rna.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 40,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/450.rna.sims.filter.gz"
},
"450.rna.sims.gz": {
"compression": "gzip",
"description": null,
"size": 33,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/450.rna.sims.gz"
},
"450.rna.sims.info": {
"compression": null,
"description": null,
"size": 1376,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/450.rna.sims.info"
},
"550.cluster.aa90.faa.gz": {
"compression": "gzip",
"description": null,
"size": 150825,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/550.cluster.aa90.faa.gz"
},
"550.cluster.aa90.faa.stats": {
"compression": null,
"description": null,
"size": 121,
"type": "fasta",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/550.cluster.aa90.faa.stats"
},
"550.cluster.aa90.info": {
"compression": null,
"description": null,
"size": 1080,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/550.cluster.aa90.info"
},
"550.cluster.aa90.mapping": {
"compression": null,
"description": null,
"size": 1076,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/550.cluster.aa90.mapping"
},
"550.cluster.aa90.mapping.stats": {
"compression": null,
"description": null,
"size": 45,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/550.cluster.aa90.mapping.stats"
},
"640.loadAWE.info": {
"compression": null,
"description": null,
"size": 114,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/640.loadAWE.info"
},
"650.superblat.expand.lca.gz": {
"compression": "gzip",
"description": null,
"size": 121259,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/650.superblat.expand.lca.gz"
},
"650.superblat.expand.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 99902,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/650.superblat.expand.ontology.gz"
},
"650.superblat.expand.protein.gz": {
"compression": "gzip",
"description": null,
"size": 172584,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/650.superblat.expand.protein.gz"
},
"650.superblat.sims.filter.gz": {
"compression": "gzip",
"description": null,
"size": 71574,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/650.superblat.sims.filter.gz"
},
"650.superblat.sims.gz": {
"compression": "gzip",
"description": null,
"size": 390271,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/650.superblat.sims.gz"
},
"650.superblat.sims.info": {
"compression": null,
"description": null,
"size": 1343,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/650.superblat.sims.info"
},
"900.abundance.function.gz": {
"compression": "gzip",
"description": null,
"size": 142941,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/900.abundance.function.gz"
},
"900.abundance.lca.gz": {
"compression": "gzip",
"description": null,
"size": 7959,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/900.abundance.lca.gz"
},
"900.abundance.md5.gz": {
"compression": "gzip",
"description": null,
"size": 54807,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/900.abundance.md5.gz"
},
"900.abundance.ontology.gz": {
"compression": "gzip",
"description": null,
"size": 136951,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/900.abundance.ontology.gz"
},
"900.abundance.organism.gz": {
"compression": "gzip",
"description": null,
"size": 108338,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/900.abundance.organism.gz"
},
"900.loadDB.sims.filter.seq": {
"compression": null,
"description": null,
"size": 2189859,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/900.loadDB.sims.filter.seq"
},
"900.loadDB.source.stats": {
"compression": null,
"description": null,
"size": 569,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/900.loadDB.source.stats"
},
"999.done.COG.stats": {
"compression": null,
"description": null,
"size": 114,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/999.done.COG.stats"
},
"999.done.KO.stats": {
"compression": null,
"description": null,
"size": 148,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/999.done.KO.stats"
},
"999.done.NOG.stats": {
"compression": null,
"description": null,
"size": 109,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/999.done.NOG.stats"
},
"999.done.Subsystems.stats": {
"compression": null,
"description": null,
"size": 736,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/999.done.Subsystems.stats"
},
"999.done.class.stats": {
"compression": null,
"description": null,
"size": 1776,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/999.done.class.stats"
},
"999.done.domain.stats": {
"compression": null,
"description": null,
"size": 58,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/999.done.domain.stats"
},
"999.done.family.stats": {
"compression": null,
"description": null,
"size": 5393,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/999.done.family.stats"
},
"999.done.genus.stats": {
"compression": null,
"description": null,
"size": 6938,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/999.done.genus.stats"
},
"999.done.order.stats": {
"compression": null,
"description": null,
"size": 2945,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/999.done.order.stats"
},
"999.done.phylum.stats": {
"compression": null,
"description": null,
"size": 838,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/999.done.phylum.stats"
},
"999.done.rarefaction.stats": {
"compression": null,
"description": null,
"size": 14737,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/999.done.rarefaction.stats"
},
"999.done.sims.stats": {
"compression": null,
"description": null,
"size": 81,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/999.done.sims.stats"
},
"999.done.species.stats": {
"compression": null,
"description": null,
"size": 18532,
"type": "txt",
"url": "http://api.metagenomics.anl.gov/analysis/data/id/mgm4442591.4/file/999.done.species.stats"
}
},
"id": "mgm4442591.4",
"provider": "metagenomics.anl.gov",
"providerId": "mgm4442591.4"
}
},
"raw": {
"mgm4442591.4.fna.gz": {
"compression": "gzip",
"format": "fasta",
"provider": "metagenomics.anl.gov",
"url": "http://api.metagenomics.anl.gov/reads/mgm4442591.4"
}
}
} | [
"jared.wilkening@gmail.com"
] | jared.wilkening@gmail.com |
9dd4028d837a6088d93a67f84e1dcf0f4ae9263a | b4f090ee2d9fdd05698acf8a739f37c29c00d024 | /backend/plugins/appengine/web/login/facebook.py | 9ddce87461ac3c6d7d36b50e5e418175fc66e5a4 | [
"MIT"
] | permissive | gamunax/pyhtongamunax | eed10cae4a646398088d42088909952eeb13c0a4 | 56df91f285e21da26a7b728366cb26f0321d38db | refs/heads/master | 2021-01-19T08:32:21.808519 | 2014-07-09T23:53:07 | 2014-07-09T23:53:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.tmpl_middleware import TemplateResponse
from gaecookie.decorator import no_csrf
from gaepermission import facade
from gaepermission.decorator import login_not_required, permissions
from permission_app.model import ADMIN
import settings
from tekton import router
from tekton.gae.middleware.redirect import RedirectResponse
from web import admin
from web.login import pending
@login_not_required
def index(_resp, token, ret_path='/'):
cmd = facade.login_facebook(token, _resp)
cmd()
if cmd.pending_link:
pending_path = router.to_path(pending.index, cmd.pending_link.key.id())
user_email = cmd.main_user_from_email.email
facade.send_passwordless_login_link(user_email,
settings.APP_URL + pending_path).execute()
return TemplateResponse({'provider': 'Facebook', 'email': user_email}, 'login/pending.html')
return RedirectResponse(ret_path)
@permissions(ADMIN)
@no_csrf
def form():
app = facade.get_facebook_app_data().execute().result
dct = {'save_app_path': router.to_path(save), 'app': app}
return TemplateResponse(dct)
@permissions(ADMIN)
def save( app_id, token):
facade.save_or_update_facebook_app_data(app_id, token).execute()
return RedirectResponse(admin) | [
"jjsm71@yahoo.com"
] | jjsm71@yahoo.com |
1e0e5b7ac306cb09a1e26e38bbef8510aa6eab4e | 05a522ac833aa897b28d6e3bad7e654aa4337aec | /core/views.py | d07f7aa77daa1a118ecf2aadf60edc64c58cfedc | [] | no_license | wudangbio/fake-zhihu | 155ba0c67e2b7963bba8c44b7dba962964b4700a | d4f2de38fa667acf426a9391b0aaf9e41da09fb2 | refs/heads/master | 2021-09-12T02:27:04.498777 | 2018-04-13T20:11:49 | 2018-04-13T20:11:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,094 | py | from django.shortcuts import render, get_object_or_404, redirect
from django.core.urlresolvers import reverse
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, JsonResponse
from django.contrib.auth.models import User
from django.db.models import Count
from django.views.decorators.http import require_POST
from django.contrib import messages
from taggit.models import Tag
from redis import StrictRedis, ConnectionPool
from account.models import Contact
from .models import Question, Action
from .forms import AnswerForm, QuestionForm
from .utils import create_action
from .decorators import ajax_required
pool = ConnectionPool(host=settings.REDIS_HOST,
port=settings.REDIS_PORT,
db=settings.REDIS_DB)
r = StrictRedis(connection_pool=pool)
@login_required
def activity(request):
actions = Action.objects.exclude(user=request.user)
following_ids = request.user.following.values_list('id', flat=True)
if following_ids:
actions = actions.filter(user_id__in=following_ids) \
.select_related('user', 'user__profile') \
.prefetch_related('target')
return render(request,
'core/activity.html',
{'actions': actions})
@login_required
def question(request):
questions = Question.objects.all()
return render(request,
'core/question.html',
{'questions': questions})
@login_required
def question_ranking(request):
question_ranking = r.zrange('question_ranking', 0, -1,
desc=True)[:10]
question_ranking_ids = [int(id) for id in question_ranking]
most_viewd = list(Question.objects.filter(id__in=question_ranking_ids))
most_viewd.sort(key=lambda x: question_ranking_ids.index(x.id))
return render(request,
'core/question_rank.html',
{'most_viewd': most_viewd})
# 首页视图转到这里
index = question
def question_detail(request, id):
question = get_object_or_404(Question, pk=id)
answers = question.answers.all()
# total_views = r.incr('question:{}:views'.format(question.id))
# r.zincrby('question_ranking', question.id, 1)
if request.method == 'POST':
answer_form = AnswerForm(request.POST)
'''
if question.answers.filter(author=request.user).exists():
messages.error(request, '你已回答过此问题了!')
return redirect(question)
'''
if answer_form.is_valid():
new_answer = answer_form.save(commit=False)
new_answer.question = question
new_answer.author = request.user
new_answer.save()
create_action(request.user, '回答了问题', question)
return redirect(question)
else:
answer_form = AnswerForm()
return render(request,
'core/question_detail.html',
{'question': question,
'answers': answers,
'answer_form': answer_form})
@login_required
def topic(request):
topics = Tag.objects.all()
return render(request,
'core/topics.html',
{'topics': topics})
def people(request, username):
user = get_object_or_404(User, username=username)
actions = Action.objects.filter(user=user)
return render(request,
'core/people.html',
{'user': user, 'actions': actions})
@login_required
def create_question(request):
if request.method == 'POST':
form = QuestionForm(request.POST)
if form.is_valid():
new_question = form.save(commit=False)
new_question.author = request.user
new_question.save()
messages.success(request, '你问了一个问题')
return redirect(reverse('question'))
else:
form = QuestionForm()
return render(request, 'core/ask.html',
{
'form': form
})
@require_POST
@ajax_required
@login_required
def user_follow(request):
user_id = request.POST.get('id')
action = request.POST.get('action')
if user_id and action:
try:
user = User.objects.get(pk=user_id)
if action == 'follow':
Contact.objects.get_or_create(user_from=request.user,
user_to=user)
create_action(request.user, '关注了用户', user)
else:
Contact.objects.filter(user_from=request.user,
user_to=user).delete()
return JsonResponse({'status': 'ok'})
except User.DoesNotExist:
return JsonResponse({'status': 'ko'})
return JsonResponse({'status': 'ko'})
def search(request):
word = request.GET.get('word')
if not word:
return JsonResponse({'error': 'You got nothing'})
return JsonResponse({'success': 'Will be complete soon!'})
| [
"974123274@qq.com"
] | 974123274@qq.com |
dc260012f0c0d3c0506f694e98b820c05480820f | 42147c42b37e0e4910bbff03a895616120f30958 | /BGP_Forecast_Modules/Utilities/Utilities.py | bc329e7d3542e6471677978f9c7d8d36af5ff032 | [
"MIT"
] | permissive | xinyuwang1209/BGP_Forecast_Modules | 7be7b7db385d77ef37249e6b908f341b6a6edb98 | 8ecaee2f3e7bc40ed56acc0350e4e051bf751233 | refs/heads/master | 2020-05-05T13:04:46.854040 | 2019-04-24T16:59:47 | 2019-04-24T16:59:47 | 180,058,237 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,146 | py | import time
import datetime
import random
import pandas as pd
import numpy as np
import configparser
import ipaddress
# from .Database import Database as DB
import pathos
import signal
import sys
import os
import time
def get_ipnetwork_version(ipnetwork):
return ipnetwork.version
# redefine process pool via inheritance
import multiprocess.context as context
class NoDaemonProcess(context.Process):
def _get_daemon(self):
return False
def _set_daemon(self, value):
pass
daemon = property(_get_daemon, _set_daemon)
class NoDaemonPool(pathos.multiprocessing.Pool):
def Process(self, *args, **kwds):
return NoDaemonProcess(*args, **kwds)
def get_pid_i(x):
return os.getpid()
def hard_kill_pool(pid_is, pool):
for pid_i in pid_is:
os.kill(pid_i, signal.SIGINT) # sending Ctrl+C
pool.terminate()
def myproc(args):
i, max_workers = args
l_args = [j for j in range(i)]
mysubproc = lambda x : x
pool = pathos.pools.ProcessPool(max_workers)
pool.restart(force=True)
pid_is = pool.map(get_pid_i, range(max_workers))
try:
l_traj_df = pool.amap(mysubproc, l_args)
counter_i = 0
while not l_traj_df.ready():
time.sleep(1)
if counter_i % 30 == 0:
print('Waiting for children running in pool.amap() in myproc( {} ) with PIDs: {}'.format(i, pid_is))
counter_i += 1
l_traj_df = l_traj_df.get()
pool.close()
pool.join()
except KeyboardInterrupt:
print('Ctrl+C received in myproc( {} ), attempting to terminate pool...').format(myens)
hard_kill_pool(pid_is, pool) # sending Ctrl+C
raise
except:
print('Attempting to close parallel after exception: {} in myproc( {} )'.format(sys.exc_info()[0], myens))
hard_kill_pool(pid_is, pool) # sending Ctrl+C
raise
def get_network_version(prefix):
return prefix.version
def random_prefix_generator():
random_prefix = '/' + str(random.randint(0,32))
for j in range(4):
random_prefix = '.' + str(random.randint(0,255)) + random_prefix
return random_prefix[1:]
def random_asn_generator():
return random.randint(0,1000000)
def random_boolean_generator():
return random.random() < 0.5
def time_parser(str):
seconds = 0
minutes = 0
hours = 0
days = 0
weeks = 0
for element in str.split(":"):
if element[-1] == 's':
seconds = int(element[:-1])
if element[-1] == 'm':
minutes = int(element[:-1])
if element[-1] == 'h':
hours = int(element[:-1])
if element[-1] == 'd':
days = int(element[:-1])
if element[-1] == 'w':
weeks = int(element[:-1])
return [seconds, minutes, hours, days, weeks]
def time_parser_datetime(str):
[seconds, minutes, hours, days, weeks] = time_parser(str)
return datetime.timedelta(
seconds=seconds,
minutes=minutes,
hours=hours,
days=days,
weeks=weeks
)
def time_parser_epoch(str):
[seconds, minutes, hours, days, weeks] = time_parser(str)
days = days + weeks * 7
hours = hours + days * 24
minutes = minutes + hours * 60
seconds = seconds + minutes * 60
return seconds
def random_epoch_generator():
return random.randint(0, 2147483648)
def random_prefix_origin_generator(nrows,has_asn=False,fix_asn=None,special_one=False):
# Generate random DataFrame
df = pd.DataFrame()
if has_asn:
df['asn'] = 0
df['prefix'] = 0
df['origin'] = 0
if has_asn:
df['received_from_asn'] = 0
df['invalid_length'] = 0
df['invalid_asn'] = 0
df['time'] = 0
df['decision_1'] = 0
df['decision_2'] = 0
df['decision_3'] = 0
df['decision_4'] = 0
for i in range(nrows):
row = []
if has_asn:
if fix_asn == None:
random_asn = random_asn_generator()
else:
random_asn = fix_asn
row.append(random_asn)
random_prefix = random_prefix_generator()
row.append(random_prefix)
random_origin = random_asn_generator()
row.append(random_origin)
if has_asn:
random_received_from_asn = random_asn_generator()
row.append(random_received_from_asn)
random_invalid_length = random_boolean_generator()
row.append(random_invalid_length)
random_invalid_asn = random_boolean_generator()
row.append(random_invalid_asn)
random_time = random_epoch_generator()
row.append(random_time)
decision_1 = None
row.append(decision_1)
decision_2 = None
row.append(decision_2)
decision_3 = None
row.append(decision_3)
decision_4 = None
row.append(decision_4)
df.loc[i] = row
df = df.loc[(df['invalid_asn'] == True) | (df['invalid_length'] == True)]
df.loc[nrows] = [21472,'11.11.11.11/32',13335,1234,1,1,100,None,None,None,None]
return df
def print_time(*args):
print(str(datetime.datetime.now())[:-7],*args)
return
def get_config():
config = configparser.ConfigParser()
config_file = '/'.join(os.path.abspath(os.path.dirname(__file__)).split('/')[:-1]) + '/config.ini'
config.read(config_file)
return config
# if __name__ == "__main__":
# config = get_config()
# print(config.sections())
# print(random_prefix_generator())
# print(random_asn_generator())
# print(random_boolean_generator())
#
# # Test Time_Parser
# time_shift = "520w:1h:1s"
# print(time_parser_epoch(time_shift))
# time_current = time.time()
# print(datetime.datetime.fromtimestamp(time_current))
# print(datetime.datetime.fromtimestamp(time_current - time_parser_epoch(time_shift)))
# print(time_parser_datetime(time_shift))
#
# print(random_prefix_origin_generator(1,has_asn=True,fix_asn=13335,special_one=True))
| [
"xinyuwang1209@gmail.com"
] | xinyuwang1209@gmail.com |
a45badb01083f3dd8da02664c215be53c12a772b | 41687b3a69a22f217d5c21cefbba46bec32211d7 | /customers/apps.py | f56860dd630423df1f9c6a853dce34bf2701a895 | [] | no_license | smok2288/SPA | 5dc90e3b26ed1269c53d8f42b479c2e3b8ed5529 | 86f6026e6a3472c85f661b0e113791f479a99a95 | refs/heads/master | 2023-07-07T23:05:31.258085 | 2021-08-15T19:05:13 | 2021-08-15T19:05:13 | 396,458,728 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 149 | py | from django.apps import AppConfig
class SpConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'customers'
| [
"https://github.com/smok2288"
] | https://github.com/smok2288 |
f78716a244468d95da06a063b911cc83f60a0e4d | 71b8ac21a413a72f9cd9ced282795de2b9e7dcc9 | /programa1.py | af2510e6a93346d9124444dc79c60a7341d733bb | [] | no_license | aleramirezsj/pruebas-en-clase-DGBD | 01f1fcd782461486b74b6978494be5c967361db6 | 27030c47ac6ecf1ff7255d09f59c68e64e505a23 | refs/heads/master | 2022-05-08T07:41:38.514727 | 2022-04-19T16:37:15 | 2022-04-19T16:37:15 | 11,011,953 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
print "CARGA DE ENCUESTA"
nombre=raw_input("Ingrese su nombre:")
apellido=raw_input("Ingrese su apellido:")
print "Hola su nombre es: "+nombre
print "su apellido es: "+apellido
print "Hola "+nombre+" "+apellido
print "Hola %s %s" % (nombre, apellido)
edad=raw_input("Ingrese su edad:")
dias_de_vida=int(edad)*365
print "tu dias de vida son:"+str(dias_de_vida)
| [
"aleramirezsj@gmail.com"
] | aleramirezsj@gmail.com |
bd212e03046d6f4021a732185732580b0e875514 | 080850d6327adadb6091660f8487855e62045963 | /ADT Implementations/Queue/Queue.py | 23590ac23e6df853dcbdffced5e5df7df7308118 | [] | no_license | snowtheghost/Algorithms | 6076f0d267d1afa0f8b97dc589831c5d9a01dd68 | f90342e2466089d8f57095f32b3954cd547444e0 | refs/heads/master | 2023-06-05T09:41:46.599294 | 2021-06-19T11:51:33 | 2021-06-19T11:51:33 | 339,636,024 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | from typing import Any, Optional
class Queue:
queue = []
def enqueue(self, item: Any) -> None:
self.queue.append(item)
def is_empty(self) -> bool:
return len(self.queue) == 0
def dequeue(self) -> Optional[Any]:
if self.is_empty():
return None
else:
return self.queue.pop(0)
if __name__ == '__main__':
q = Queue()
q.enqueue("a")
q.enqueue("b")
q.enqueue("c")
print(q.dequeue())
q.enqueue("d")
print(q.dequeue())
print(q.dequeue())
print(q.is_empty())
print(q.dequeue())
print(q.is_empty())
print(q.dequeue()) | [
"67134961+snowtheghost@users.noreply.github.com"
] | 67134961+snowtheghost@users.noreply.github.com |
019a2296e2c2a44d1f6bad58af35702346c4199e | 2ae8fe4fccac95d98dffe77d5a948e64c3cb91b5 | /2404.py | ede38319f93e8799fd73115eae1a663d4d2fbf38 | [] | no_license | lilaboc/leetcode | e61362592f87d094fe57635af1d7d2b93284bfe8 | d4c5329a9d6ce945b965fd9d811757a80934dd36 | refs/heads/master | 2023-07-07T07:30:29.516698 | 2023-06-24T03:16:16 | 2023-06-24T03:16:16 | 25,621,054 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | # https://leetcode.com/problems/most-frequent-even-element/description/
from collections import Counter
from typing import List
class Solution:
def mostFrequentEven(self, nums: List[int]) -> int:
c = Counter([i for i in nums if i % 2 == 0])
most = c.most_common()
if len(most) == 0:
return -1
return sorted([i[0] for i in most if i[1] == most[0][1]])[0]
print(Solution().mostFrequentEven([0,1,2,2,4,4,1])) | [
"lilaboc.cn@gmail.com"
] | lilaboc.cn@gmail.com |
1b792aa6f1140cc922e73523d4d5655c8e194e2b | c516aadc74fe69ca6ebbb8f0d06f2f1f6d643eec | /0x08-python-more_classes/5-rectangle.py | fb317d5f386505edfa1bfeffbedf0c550e12c12a | [] | no_license | Adam-Of-Earth/holbertonschool-higher_level_programming | 05560351453456bf769bc761a5d4318e456a7887 | db0b098c2bdd2cdfa0e1019ed679b008f5187f76 | refs/heads/master | 2020-05-18T02:56:35.352147 | 2020-02-14T03:47:06 | 2020-02-14T03:47:06 | 184,131,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,041 | py | #!/usr/bin/python3
"""class defineing a rectangle"""
class Rectangle:
"""rectangle class"""
def __init__(self, width=0, height=0):
"""initialize rectangle
Args:
height(int): rectangle height
width(int): rectangel width
"""
self.height = height
self.width = width
def __str__(self):
"""draws the rectangle
returns: (string)image of rectangle"""
ret = []
if self.__height == 0 or self.__width == 0:
return ""
for i in range(self.__height):
ret.append('#' * self.__width)
return '\n'.join(ret)
def __repr__(self):
"""rep of the rectangle
return: (str)representation of rectangle"""
return "{}({}, {})".format(
type(self).__name__,
self.__width,
self.__height
)
def __del__(self):
"""messege when deleted"""
print("Bye rectangle...")
@property
def height(self):
"""gets the height"""
return self.__height
@height.setter
def height(self, val):
"""sets height"""
if not isinstance(val, int):
raise TypeError("height must be an integer")
if val < 0:
raise TypeError("height must be >= 0")
self.__height = val
@property
def width(self):
"""gets the width"""
return self.__width
@width.setter
def width(self,val):
"""sets width"""
if not isinstance(val, int):
raise TypeError("width must be an integer")
if val < 0:
raise TypeError("width must be >= 0")
self.__width = val
def area(self):
"""finds area of rectangle
return: area(int)"""
return self.__height * self.__width
def perimeter(self):
"""finds perimeter of rectangle
return: perimeter(int)"""
if self.__width == 0 or self.__height == 0:
return 0
return (self.__height * 2) + (self.__width * 2)
| [
"root@LAPTOP-CEF3OEJ9.localdomain"
] | root@LAPTOP-CEF3OEJ9.localdomain |
e7f62389e2d1934e290e25ec028cbc2e6bfcf75c | b83de7b1c7fa7cecd5cdc63554902f4b5746fceb | /mmimproc/projects/bbc/meg/picks.py | 954854bd73a41af3a496b457082732b52e67ccf5 | [] | no_license | mrjeffstevenson3/mmimproc | 195c2d660e041c68ea9b9db524c444ee111291e0 | 1aed4b1ce0ef5606a702af02b341ce3291a86283 | refs/heads/master | 2021-11-27T12:21:59.356889 | 2021-09-15T20:06:11 | 2021-09-15T20:06:11 | 171,966,939 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 162 | py | fullchpi = [
105,
106,
108,
208,
209,
211,
236,
253,
]
fullChpiValidEpochs = [ # excludes 253 for whom all epochs rejected
105,
106,
108,
208,
209,
211,
236,
]
| [
"japsai@gmail.com"
] | japsai@gmail.com |
a8f7e8bf214ebd5a2a3b3373459f6afae3e20cb6 | 8698c1fb46ef82b5f4dd3927494b16d3f7313335 | /short_long_performance.py | 8a38eae1dea645dbbe522e559f5b309835409df9 | [] | no_license | AgFeather/CodeCompletion | a19589706a59aed2893f6b721e737e7c8a938576 | d5427ec27aafb1a2376058ce9ce2810f027b7fce | refs/heads/master | 2020-03-19T07:37:03.932391 | 2019-07-09T15:00:22 | 2019-07-09T15:00:22 | 136,131,799 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,842 | py | import tensorflow as tf
import numpy as np
import pickle
from collections import Counter
import utils
from nn_model.lstm_model import RnnModel
from setting import Setting
"""用以测试训练好的LSTM模型对不同长度时每个token的预测准确率"""
test_setting = Setting()
test_subset_data_dir = test_setting.sub_int_test_dir
model_save_dir = test_setting.lstm_model_save_dir
test_log_dir = test_setting.lstm_test_log_dir
num_subset_test_data = test_setting.num_sub_test_data
seq_per_subset = 5000
show_every_n = test_setting.test_show
num_terminal = test_setting.num_terminal
test_time_step = 50
short_long_performance = test_setting.temp_info + 'short_long_performance.p'
class ShortLongTest(object):
"""Record the performance with different length"""
def __init__(self,
num_ntoken,
num_ttoken,):
self.model = RnnModel(num_ntoken, num_ttoken, is_training=False)
self.sess = tf.Session()
self.last_chackpoints = tf.train.latest_checkpoint(
checkpoint_dir=model_save_dir)
saver = tf.train.Saver()
saver.restore(self.sess, self.last_chackpoints)
def subset_generator(self):
for index in range(1, num_subset_test_data +1):
with open(test_subset_data_dir + 'int_part{}.json'.format(index), 'rb') as file:
subset_data = pickle.load(file)
yield index, subset_data
def find_long_seq(self, length_define=5000, saved_info=False):
"""pick up longer test cases in test dataset"""
long_case = []
subdata_generator = self.subset_generator()
length_counter = Counter()
for index, subset_test_data in subdata_generator:
for token_seq in subset_test_data:
length_counter[len(token_seq)] += 1
if len(token_seq) >= length_define:
token_seq = token_seq[:length_define+1]
long_case.append(token_seq)
sorted_counter = sorted(length_counter.items(), key=lambda x: x[0] ,reverse=True)
return long_case
def short_long_performance(self):
length_define = 5000
long_case = self.find_long_seq(length_define)
num_test_case = len(long_case)
long_case = np.array(long_case) # long_case.shape = (258, 5001, 2)
test_epoch = 1
length_nt_correct = np.zeros(length_define, dtype=np.float32)
length_tt_correct = np.zeros(length_define, dtype=np.float32)
for i in range(test_epoch):
lstm_state = self.sess.run(self.model.init_state)
for test_case in long_case:
nt_token_input = test_case[:length_define, 0].reshape([1, length_define])
tt_token_input = test_case[:length_define, 1].reshape([1, length_define])
nt_token_target = test_case[1:length_define +1, 0]
tt_token_target = test_case[1:length_define +1, 1]
feed = {self.model.lstm_state: lstm_state,
self.model.n_input :nt_token_input,
self.model.t_input :tt_token_input,
self.model.keep_prob :1.0}
lstm_state, n_prediction, t_prediction = self.sess.run(
[self.model.final_state, self.model.n_output, self.model.t_output], feed)
n_prediction = np.argmax(n_prediction, axis=1)
t_prediction = np.argmax(t_prediction, axis=1)
nt_result = np.equal(n_prediction, nt_token_target).astype(np.float32).reshape(length_define)
tt_result = np.equal(t_prediction, tt_token_target).astype(np.float32).reshape(length_define)
length_nt_correct += nt_result
length_tt_correct += tt_result
nt_accuracy = length_nt_correct / (test_epoch * num_test_case)
tt_accuracy = length_tt_correct / (test_epoch * num_test_case)
file = open(short_long_performance, 'wb')
pickle.dump([nt_accuracy, tt_accuracy], file)
return nt_accuracy, tt_accuracy
def plot_performance(self):
import matplotlib.pyplot as plt
file = open(short_long_performance, 'rb')
nt_accuracy, tt_accuracy = pickle.load(file)
plt.figure(figsize=(40, 12))
plt.plot(nt_accuracy, label='non-terminal')
plt.plot(tt_accuracy, label='terminal')
plt.xlabel('time step')
plt.ylabel('accuracy')
plt.title('performance with length')
plt.grid()
plt.show()
if __name__ == '__main__':
# test step
tt_token_to_int, tt_int_to_token, nt_token_to_int, nt_int_to_token = utils.load_dict_parameter()
num_ntoken = len(nt_token_to_int)
num_ttoken = len(tt_token_to_int)
model = ShortLongTest(num_ntoken, num_ttoken)
model.short_long_performance() | [
"18843740508@163.com"
] | 18843740508@163.com |
7c4472d1c378a26fb67ec9911be21421a4e0f8e4 | b980c0bae0cff8533253c135449beb6e09759dca | /Grader_Exercise/04_Loop/04_Loop_002.py | 1edaec5e838baacb6893af99ef8e5cf7b8e6e326 | [] | no_license | manhanton/COM-PROG | 1f76985b3f3fea54057a0da1d3911dc91998c5be | 7a4f2c62ecd6677ec1f818a5d115aa0fb182b3a2 | refs/heads/main | 2023-06-18T10:25:26.448133 | 2021-07-16T07:46:45 | 2021-07-16T07:46:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | d = [int(i) for i in input().split(' ')]
p = d[-1]
i = -1
j = 0
n = len(d)
while j < n-1 :
if d[j] <= p :
i += 1
d[i],d[j] = d[j],d[i]
j += 1
d[-1],d[i+1] = d[i+1],d[-1]
print(d) | [
"meen2545@gmail.com"
] | meen2545@gmail.com |
6d06c44ef0584637a7e5e9645ae9ac066be5356e | 610349599d32d7fc5ddae5dcb202836ca8be50aa | /blog/migrations/0012_auto_20200916_1140.py | 6fec12b0c70e4c5506fc56f580196f8e254756e6 | [] | no_license | reetjakhar09/blogs | e3d9d14c01096e4a50474b5a7f562bea7b655a76 | d0e17a8dd3761aaa08a59c466820040e05dc300a | refs/heads/master | 2022-12-20T05:03:50.350408 | 2020-09-29T16:40:17 | 2020-09-29T16:40:17 | 299,676,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 431 | py | # Generated by Django 2.2.16 on 2020-09-16 11:40
import autoslug.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0011_auto_20200916_1139'),
]
operations = [
migrations.AlterField(
model_name='post',
name='slug',
field=autoslug.fields.AutoSlugField(editable=True, populate_from='title'),
),
]
| [
"test@gmail.com"
] | test@gmail.com |
ac246990b012581cf2aef59788d0fe2daab1f65d | e6cb36499ac9395df0633572720067590b2def9d | /mysite/polls/views.py | 255064582ae09b03547b525b50bb7f7aab475c66 | [] | no_license | zizoua5000/django_tut | 8f6f2ea29fa0815c85ceb914c658763ced2d8d70 | a9375e8af4d7bb191cbe5a590b23b864039b78d0 | refs/heads/master | 2021-05-24T17:19:20.689104 | 2020-04-22T19:24:45 | 2020-04-22T19:24:45 | 253,674,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 883 | py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.template import loader
from django.http import Http404
from .models import Question
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
# template = loader.get_template('polls/index.html')
context = {
'latest_question_list': latest_question_list
}
return render(request, 'polls/index.html', context)
def detail(request, question_id):
question = get_object_or_404(Question, pk=question_id)
return render(request, 'polls/detail.html', {'question':question})
def results(request, question_id):
response = "You're looking at the results of question %s."
return HttpResponse(response % question_id)
def vote(request, question_id):
return HttpResponse("You're voting on question %s." % question_id) | [
"zizoua5000@gmail.com"
] | zizoua5000@gmail.com |
23d921df74820fd0183bc9e08cb23f850a616537 | 288fcc9d9e4aa717955dcebda0d0f2f96356f104 | /Build_Out_Tool_4.py | dc3f4e222dab96a6c226d2d12309e9a0b956148d | [] | no_license | Daktic/Buildout-V4 | 8b081d8e5c0f96a15ca5c51a7794468db8fc3a3e | c6adecc554a9e46539d1322477faee964c4b1ae0 | refs/heads/master | 2022-12-17T19:33:03.546454 | 2020-09-23T19:07:35 | 2020-09-23T19:07:35 | 293,821,642 | 0 | 0 | null | 2020-09-09T19:29:28 | 2020-09-08T13:39:27 | Python | UTF-8 | Python | false | false | 32,367 | py | import tkinter as tk
from tkinter import IntVar
from openpyxl import Workbook
root= tk.Tk()
canvas1 = tk.Canvas(root, width = 1700, height = 800)
canvas1.pack()
root.attributes("-fullscreen", True)
keywords = tk.Entry (root)
branded_words = tk.Entry (root)
#brand
tactic_type = tk.Entry (root)
tier = tk.Entry (root)
corp_non = tk.Entry (root)
lob = tk.Entry (root)
intiative = tk.Entry (root)
#match_type
region = tk.Entry (root)
market = tk.Entry (root)
sub_region = tk.Entry (root)
language = tk.Entry (root)
device = tk.Entry (root)
topic = tk.Entry(root)
url_google = tk.Entry (root)
url_bing = tk.Entry (root)
url_g_aw = tk.Entry (root)
url_g_it = tk.Entry (root)
url_g_co = tk.Entry (root)
url_g_ev = tk.Entry (root)
url_b_aw = tk.Entry (root)
url_b_it = tk.Entry (root)
url_b_co = tk.Entry (root)
url_b_ev = tk.Entry (root)
#cj_stages
canvas1.create_window(100, 140, window=keywords)
canvas1.create_window(350, 140, window=branded_words)
canvas1.create_window(250, 240, window=tactic_type)
canvas1.create_window(400, 240, window=tier)
canvas1.create_window(550, 240, window=corp_non)
canvas1.create_window(700, 240, window=lob)
canvas1.create_window(850, 240, window=intiative)
canvas1.create_window(1000, 240, window=region)
canvas1.create_window(1150, 240, window=market)
canvas1.create_window(1300, 240, window=sub_region)
canvas1.create_window(1450, 240, window=language)
canvas1.create_window(1600, 240, window=device)
canvas1.create_window(600, 140, window=topic)
canvas1.create_window(100, 340, window=url_google)
canvas1.create_window(250, 340, window=url_bing)
canvas1.create_window(700, 440, window=url_g_aw)
canvas1.create_window(850, 440, window=url_b_aw)
canvas1.create_window(700, 540, window=url_g_it)
canvas1.create_window(850, 540, window=url_b_it)
canvas1.create_window(700, 640, window=url_g_co)
canvas1.create_window(850, 640, window=url_b_co)
canvas1.create_window(700, 740, window=url_g_ev)
canvas1.create_window(850, 740, window=url_b_ev)
keyword_label = tk.Label(root, text='Keywords with "/" delimintor')
branded_words_label = tk.Label(root, text='Branded Words with "/" delimintor')
tactic_type_label = tk.Label(root, text='AW/DG')
tier_label = tk.Label(root, text='Tier')
corp_non_label = tk.Label(root, text='Corp/Non-Corp')
lob_label = tk.Label(root, text='L.O.B')
intiative_label = tk.Label(root, text='Intitative')
region_label = tk.Label(root, text='Region')
market_label = tk.Label(root, text='Market')
sub_region_label = tk.Label(root, text='Sub Region')
language_label = tk.Label(root, text='Language')
device_label = tk.Label(root, text='Device')
topic_label = tk.Label(root, text='Topic')
url_google_label = tk.Label(root, text='URL Google')
url_bing_label = tk.Label(root, text='URL Bing')
cj_url_labels = tk.Label(root, text='URLS for CJ stage')
google_label = tk.Label(root, text='Google')
bing_label = tk.Label(root, text='Bing')
aw_label = tk.Label(root, text='AW')
it_label = tk.Label(root, text='IT')
co_label = tk.Label(root, text='CO')
ev_label = tk.Label(root, text='EV')
canvas1.create_window(100, 100, window=keyword_label)
canvas1.create_window(350, 100, window=branded_words_label)
canvas1.create_window(600, 100, window=topic_label)
canvas1.create_window(250, 200, window=tactic_type_label)
canvas1.create_window(400, 200, window=tier_label)
canvas1.create_window(550, 200, window=corp_non_label)
canvas1.create_window(700, 200, window=lob_label)
canvas1.create_window(850, 200, window=intiative_label)
canvas1.create_window(1000, 200, window=region_label)
canvas1.create_window(1150, 200, window=market_label)
canvas1.create_window(1300, 200, window=sub_region_label)
canvas1.create_window(1450, 200, window=language_label)
canvas1.create_window(1600, 200, window=device_label)
canvas1.create_window(100, 300, window=url_google_label)
canvas1.create_window(220, 300, window=url_bing_label)
canvas1.create_window(770, 380, window=cj_url_labels)
canvas1.create_window(700, 410, window=google_label)
canvas1.create_window(840, 410, window=bing_label)
canvas1.create_window(600, 440, window=aw_label)
canvas1.create_window(600, 540, window=it_label)
canvas1.create_window(600, 640, window=co_label)
canvas1.create_window(600, 740, window=ev_label)
v = IntVar()
v.set(1)
tk.Radiobutton(root, text="Consumer Journey", variable=v, value=1).pack(anchor='w')
tk.Radiobutton(root, text="Consumer Journey - No Stage", variable=v, value=2).pack(anchor='w')
tk.Radiobutton(root, text="Audiance Pilot", variable=v, value=3).pack(anchor='w')
def getInfo ():
build_keyword = keywords.get()
build_branded_words = branded_words.get().lower()
build_keyword_split = build_keyword.split('/')
build_branded_words_split = build_branded_words.split('/')
build_tactic_type = tactic_type.get().upper()
build_tier= 'T' + tier.get()
build_corp_non = corp_non.get()
build_lob = lob.get()
build_initative = intiative.get()
build_region = region.get().upper()
build_market = market.get().upper()
build_sub_region = sub_region.get()
build_language = language.get().upper()
build_device = device.get()
build_topic = topic.get()
build_url_google = url_google.get()
build_url_bing = url_bing.get()
return build_keyword_split, build_branded_words_split, build_tactic_type, build_tier, build_corp_non, build_lob, build_initative, build_region, build_market, build_sub_region, build_language, build_device, build_topic, build_url_google, build_url_bing
def get_cj_URLS():
aw_url_g = url_g_aw.get()
aw_url_b = url_b_aw.get()
it_url_g = url_g_it.get()
it_url_b = url_b_it.get()
co_url_g = url_g_co.get()
co_url_b = url_b_co.get()
ev_url_g = url_g_ev.get()
ev_url_b = url_b_ev.get()
return aw_url_g, aw_url_b, it_url_g, it_url_b, co_url_g, co_url_b, ev_url_g, ev_url_b
def taxonomyBuild():
info = getInfo()
radio = v.get()
br = 'BR_'
ub = 'UB_'
broad = '_Broad_'
exact = '_Exact_'
campaign_br_broad = br +'_'.join(info[2:7]) + broad + '_'.join(info[7:-3])
campaign_br_exact = br +'_'.join(info[2:7]) + exact + '_'.join(info[7:-3])
campaign_ub_broad = ub +'_'.join(info[2:7]) + broad + '_'.join(info[7:-3])
campaign_ub_exact = ub +'_'.join(info[2:7]) + exact + '_'.join(info[7:-3])
if radio == 3:
hiq = '_HIQ'
miq = '_MIQ'
unk = '_UNK'
adgroup_br_broad_HIQ = br + str(info[5]) + '_' + str(info[2]) + hiq + broad + str(info[7]) + '_' + str(info[10])
adgroup_br_broad_MIQ = br + str(info[5]) + '_' + str(info[2]) + miq + broad + str(info[7]) + '_' + str(info[10])
adgroup_br_broad_UNK = br + str(info[5]) + '_' + str(info[2]) + unk + broad + str(info[7]) + '_' + str(info[10])
adgroup_br_exact_HIQ = br + str(info[5]) + '_' + str(info[2]) + hiq + exact + str(info[7]) + '_' + str(info[10])
adgroup_br_exact_MIQ = br + str(info[5]) + '_' + str(info[2]) + miq + exact + str(info[7]) + '_' + str(info[10])
adgroup_br_exact_UNK = br + str(info[5]) + '_' + str(info[2]) + unk + exact + str(info[7]) + '_' + str(info[10])
adgroup_ub_broad_HIQ = ub + str(info[5]) + '_' + str(info[2]) + hiq + broad + str(info[7]) + '_' + str(info[10])
adgroup_ub_broad_MIQ = ub + str(info[5]) + '_' + str(info[2]) + miq + broad + str(info[7]) + '_' + str(info[10])
adgroup_ub_broad_UNK = ub + str(info[5]) + '_' + str(info[2]) + unk + broad + str(info[7]) + '_' + str(info[10])
adgroup_ub_exact_HIQ = ub + str(info[5]) + '_' + str(info[2]) + hiq + exact + str(info[7]) + '_' + str(info[10])
adgroup_ub_exact_MIQ = ub + str(info[5]) + '_' + str(info[2]) + miq + exact + str(info[7]) + '_' + str(info[10])
adgroup_ub_exact_UNK = ub + str(info[5]) + '_' + str(info[2]) + unk + exact + str(info[7]) + '_' + str(info[10])
return campaign_br_broad, campaign_br_exact, campaign_ub_broad, campaign_ub_exact, adgroup_br_broad_HIQ, adgroup_br_broad_MIQ, adgroup_br_broad_UNK, adgroup_br_exact_HIQ, adgroup_br_exact_MIQ, adgroup_br_exact_UNK, adgroup_ub_broad_HIQ,adgroup_ub_broad_MIQ, adgroup_ub_broad_UNK, adgroup_ub_exact_HIQ,adgroup_ub_exact_MIQ, adgroup_ub_exact_UNK
elif radio == 1:
aw = '_AW'
it = '_IT'
co = '_CO'
ev = '_EV'
adgroup_br_broad_AW = br + str(info[6]) + '_' + str(info[-3]) +aw +broad + str(info[-5])
adgroup_br_broad_IT = br + str(info[6]) + '_' + str(info[-3]) +it +broad + str(info[-5])
adgroup_br_broad_CO = br + str(info[6]) + '_' + str(info[-3]) +co +broad + str(info[-5])
adgroup_br_broad_EV = br + str(info[6]) + '_' + str(info[-3]) +ev +broad + str(info[-5])
adgroup_br_exact_AW = br + str(info[6]) + '_' + str(info[-3]) +aw +exact + str(info[-5])
adgroup_br_exact_IT = br + str(info[6]) + '_' + str(info[-3]) +it +exact + str(info[-5])
adgroup_br_exact_CO = br + str(info[6]) + '_' + str(info[-3]) +co +exact + str(info[-5])
adgroup_br_exact_EV = br + str(info[6]) + '_' + str(info[-3]) +ev +exact + str(info[-5])
adgroup_ub_broad_AW = ub + str(info[6]) + '_' + str(info[-3]) +aw +broad + str(info[-5])
adgroup_ub_broad_IT = ub + str(info[6]) + '_' + str(info[-3]) +it +broad + str(info[-5])
adgroup_ub_broad_CO = ub + str(info[6]) + '_' + str(info[-3]) +co +broad + str(info[-5])
adgroup_ub_broad_EV = ub + str(info[6]) + '_' + str(info[-3]) +ev +broad + str(info[-5])
adgroup_ub_exact_AW = ub + str(info[6]) + '_' + str(info[-3]) +aw +exact + str(info[-5])
adgroup_ub_exact_IT = ub + str(info[6]) + '_' + str(info[-3]) +it +exact + str(info[-5])
adgroup_ub_exact_CO = ub + str(info[6]) + '_' + str(info[-3]) +co +exact + str(info[-5])
adgroup_ub_exact_EV = ub + str(info[6]) + '_' + str(info[-3]) +ev +exact + str(info[-5])
return campaign_br_broad, campaign_br_exact, campaign_ub_broad, campaign_ub_exact, adgroup_br_broad_AW, adgroup_br_broad_IT, adgroup_br_broad_CO, adgroup_br_broad_EV, adgroup_br_exact_AW, adgroup_br_exact_IT, adgroup_br_exact_CO, adgroup_br_exact_EV, adgroup_ub_broad_AW, adgroup_ub_broad_IT, adgroup_ub_broad_CO, adgroup_ub_broad_EV, adgroup_ub_exact_AW, adgroup_ub_exact_IT, adgroup_ub_exact_CO, adgroup_ub_exact_EV
else:
adgroup_br_broad = br + str(info[6]) + '_' + str(info[-3]) +broad + str(info[-8]) + '_' + str(info[-5])
adgroup_br_exact = br + str(info[6]) + '_' + str(info[-3]) +exact + str(info[-8]) + '_' + str(info[-5])
adgroup_ub_broad = ub + str(info[6]) + '_' + str(info[-3]) +broad + str(info[-8]) + '_' + str(info[-5])
adgroup_ub_exact = ub + str(info[6]) + '_' + str(info[-3]) +exact + str(info[-8]) + '_' + str(info[-5])
return campaign_br_broad, campaign_br_exact, campaign_ub_broad, campaign_ub_exact, adgroup_br_broad, adgroup_br_exact, adgroup_ub_broad, adgroup_ub_exact
def createBuild():
taxonomy = taxonomyBuild()
info = getInfo()
urls = get_cj_URLS()
wb = Workbook()
ws = wb.active
radio = v.get()
if radio == 1:
#root words
pre_rootword_what ='What'
pre_rootword_how ='How'
pre_rootword_explain ='Explain'
pre_rootword_best ='Best'
pre_rootword_top ='Top'
pre_rootword_compare ='Compare'
pre_rootword_try ='Try'
post_rootword_explained ='Explained'
post_rootword_examples ='Examples'
post_rootword_sources ='Sources'
post_rootword_versus ='Versus'
post_rootword_review ='Review'
post_rootword_trial ='Trial'
post_rootword_demo ='Demo'
#create list permiations from rootwords and user generated keywords
#Awareness
Awareness_perm_what = [pre_rootword_what + ' ' + x for x in info[0]]
Awareness_perm_how = [pre_rootword_how + ' ' + x for x in info[0]]
Awareness_perm_explain = [pre_rootword_explain + ' ' + x for x in info[0]]
Awareness_perm_explained = [x + ' ' + post_rootword_explained for x in info[0]]
#Interest
Interest_perm_best = [pre_rootword_best + ' ' + x for x in info[0]]
Interest_perm_top = [pre_rootword_top + ' ' + x for x in info[0]]
Interest_perm_examples = [x + ' ' + post_rootword_examples for x in info[0]]
Interest_perm_sources = [x + ' ' + post_rootword_sources for x in info[0]]
#Consideration
Consideration_perm_compare = [pre_rootword_compare + ' ' + x for x in info[0]]
Consideration_perm_versus = [x + ' ' + post_rootword_versus for x in info[0]]
Consideration_perm_review = [x + ' ' + post_rootword_review for x in info[0]]
#Evaluation
Evaluation_perm_try = [pre_rootword_try + ' ' + x for x in info[0]]
Evaluation_perm_trial = [x + ' ' + post_rootword_trial for x in info[0]]
Evaluation_perm_demo = [x + ' ' + post_rootword_demo for x in info[0]]
#Grouping by Stage
Awareness = Awareness_perm_what + Awareness_perm_how + Awareness_perm_explain + Awareness_perm_explained
Interest = Interest_perm_best + Interest_perm_top + Interest_perm_examples + Interest_perm_sources
Consideration = Consideration_perm_compare + Consideration_perm_versus + Consideration_perm_review
Evaluation = Evaluation_perm_try + Evaluation_perm_trial + Evaluation_perm_demo
#creates broad match keywords
Awareness_broad_1 = ["+" + suit for suit in Awareness]
Awareness_broad_2 = [x.replace(' ',' +') for x in Awareness_broad_1]
Awareness = Awareness + Awareness_broad_2
Interest_broad_1= ["+" + suit for suit in Interest]
Interest_broad_2 = [x.replace(' ',' +') for x in Interest_broad_1]
Interest = Interest + Interest_broad_2
Consideration_broad_1 = ["+" + suit for suit in Consideration]
Consideration_broad_2 = [x.replace(' ', ' +') for x in Consideration_broad_1]
Consideration = Consideration + Consideration_broad_2
Evaluation_broad_1 = ["+" + suit for suit in Evaluation]
Evaluation_broad_2 = [x.replace(' ', ' +') for x in Evaluation_broad_1]
Evaluation = Evaluation + Evaluation_broad_2
consumer_journey_stages =[Awareness,Interest, Consideration, Evaluation]
#create worksheets
ws1 = ws.title = "Awareness"
ws2 = wb.create_sheet("Interest",1)
ws3 = wb.create_sheet("Consideration",2)
ws4 = wb.create_sheet("Evaluation",3)
def column_title(worksheet_name):
for ws in wb:
ws['A1'] = 'Campaign'
ws['B1'] = 'Ad Group'
ws['C1'] = 'Keyword'
ws['D1'] = 'Match Type'
ws['E1'] = 'Max CPC'
ws['F1'] = 'Final URL Google'
ws['G1'] = 'Final URL Bing'
ws['H1'] = 'BR/UB'
#name each column
column_title(wb["Awareness"])
column_title(wb["Interest"])
column_title(wb["Consideration"])
column_title(wb["Evaluation"])
#keyword fill function
def kfill(worksheet_name, journey_stage):
ws = worksheet_name
r=2
for word in journey_stage:
ws.cell(row=r, column=3).value = word
r+=1
#fill keyword sett to column
kfill(wb["Awareness"], Awareness)
kfill(wb["Interest"], Interest)
kfill(wb["Consideration"], Consideration)
kfill(wb["Evaluation"], Evaluation)
#match type/Max CPC/BN/UB fill function
def mfill(worksheet_name):
ws = worksheet_name
r = 2
#iterates through rows and adds formula
#Match Type
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
if '+' in ws.cell(row=r, column=3).value:
ws.cell(row=r, column=4).value = 'Broad'
else:
ws.cell(row=r, column=4).value = 'Exact'
if ws.cell(row=r, column=4).value =='Broad':
ws.cell(row=r, column=5).value = 10
else:
ws.cell(row=r, column=5).value = 12
if any(word.lower() in ws.cell(row=r, column=3).value for word in info[1]):
ws.cell(row=r, column=8).value = 'BR'
else:
ws.cell(row=r, column=8).value = 'UB'
r += 1
mfill(wb["Awareness"])
mfill(wb["Interest"])
mfill(wb["Consideration"])
mfill(wb["Evaluation"])
#URL fill function
def ufill(worksheet_name,url_asset_G,url_asset_B):
ws = worksheet_name
r = 2
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
ws.cell(row=r, column=6).value = str(url_asset_G)
ws.cell(row=r, column=7).value = str(url_asset_B)
r += 1
ufill(wb["Awareness"],urls[0], urls[1])
ufill(wb["Interest"],urls[2], urls[3])
ufill(wb["Consideration"],urls[4], urls[5])
ufill(wb["Evaluation"],urls[6],urls[7])
def afill(worksheet_name):
r = 2
ws = worksheet_name
if ws == wb["Awareness"]:
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BR':
ws.cell(row=r, column=2).value = taxonomy[4]
elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':
ws.cell(row=r, column=2).value = taxonomy[12]
elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BR':
ws.cell(row=r, column=2).value = taxonomy[8]
else:
ws.cell(row=r, column=2).value = taxonomy[16]
r+=1
elif ws == wb["Interest"]:
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BR':
ws.cell(row=r, column=2).value = taxonomy[5]
elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':
ws.cell(row=r, column=2).value = taxonomy[13]
elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BR':
ws.cell(row=r, column=2).value = taxonomy[9]
else:
ws.cell(row=r, column=2).value = taxonomy[17]
r+=1
elif ws == wb["Consideration"]:
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BN':
ws.cell(row=r, column=2).value = taxonomy[6]
elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':
ws.cell(row=r, column=2).value = taxonomy[14]
elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BR':
ws.cell(row=r, column=2).value = taxonomy[10]
else:
ws.cell(row=r, column=2).value = taxonomy[18]
r+=1
elif ws == wb["Evaluation"]:
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BR':
ws.cell(row=r, column=2).value = taxonomy[7]
elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':
ws.cell(row=r, column=2).value = taxonomy[15]
elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BR':
ws.cell(row=r, column=2).value = taxonomy[11]
else:
ws.cell(row=r, column=2).value = taxonomy[19]
r+=1
afill(wb["Awareness"])
afill(wb["Interest"])
afill(wb["Consideration"])
afill(wb["Evaluation"])
def cfill(worksheet_name):
ws = worksheet_name
r = 2
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BR':
ws.cell(row=r, column=1).value = taxonomy[0]
elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':
ws.cell(row=r, column=1).value = taxonomy[2]
elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BR':
ws.cell(row=r, column=1).value = taxonomy[1]
else:
ws.cell(row=r, column=1).value = taxonomy[3]
r +=1
cfill(wb["Awareness"])
cfill(wb["Interest"])
cfill(wb["Consideration"])
cfill(wb["Evaluation"])
#Export to Excell
wb.save(filename="Build Out_CJ_Stage.xlsx")
if radio == 2:
ws1 = ws.title = "Build"
def column_title(worksheet_name):
for ws in wb:
ws['A1'] = 'Campaign'
ws['B1'] = 'Ad Group'
ws['C1'] = 'Keyword'
ws['D1'] = 'Match Type'
ws['E1'] = 'Max CPC'
ws['F1'] = 'Final URL Google'
ws['G1'] = 'Final URL Bing'
ws['H1'] = 'BN/UB'
#name each column
column_title(wb["Build"])
#keyword fill function
def kfill(worksheet_name):
ws = worksheet_name
broad_1 = ["+" + suit for suit in info[0]]
broad_2 = [x.replace(' ',' +') for x in broad_1]
keywords_all = info[0] + broad_2
r=2
for word in keywords_all:
ws.cell(row=r, column=3).value = word
r+=1
#fill keyword sett to column
kfill(wb["Build"])
#match type/Max CPC/BN/UB fill function
def mfill(worksheet_name):
ws = worksheet_name
r = 2
#iterates through rows and adds formula
#Match Type
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
if '+' in ws.cell(row=r, column=3).value:
ws.cell(row=r, column=4).value = 'Broad'
else:
ws.cell(row=r, column=4).value = 'Exact'
if ws.cell(row=r, column=4).value =='Broad':
ws.cell(row=r, column=5).value = 5
else:
ws.cell(row=r, column=5).value = 8
if any(word.lower() in ws.cell(row=r, column=3).value for word in info[1]):
ws.cell(row=r, column=8).value = 'BN'
else:
ws.cell(row=r, column=8).value = 'UB'
r += 1
mfill(wb["Build"])
#URL fill function
def ufill(worksheet_name,url_g, url_b):
ws = worksheet_name
r = 2
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
ws.cell(row=r, column=6).value = str(info[-2])
ws.cell(row=r, column=7).value = str(info[-1])
r += 1
ufill(wb["Build"],urls[-2], urls[-1])
def cfill(worksheet_name):
ws = worksheet_name
r = 2
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BN':
ws.cell(row=r, column=1).value = taxonomy[0]
elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':
ws.cell(row=r, column=1).value = taxonomy[2]
elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BN':
ws.cell(row=r, column=1).value = taxonomy[1]
else:
ws.cell(row=r, column=1).value = taxonomy[3]
r +=1
cfill(wb["Build"])
def afill(worksheet_name):
r = 2
ws = worksheet_name
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BN':
ws.cell(row=r, column=2).value = taxonomy[4]
elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':
ws.cell(row=r, column=2).value = taxonomy[6]
elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BN':
ws.cell(row=r, column=2).value = taxonomy[5]
else:
ws.cell(row=r, column=2).value = taxonomy[7]
r+=1
afill(wb['Build'])
wb.save(filename="Build Out_CJ_No_Stage.xlsx")
if radio ==3:
ws1 = ws.title = "Build Out"
def column_title(worksheet_name):
for ws in wb:
ws['A1'] = 'Campaign'
ws['B1'] = 'Ad Group'
ws['C1'] = 'Keyword'
ws['D1'] = 'Match Type'
ws['E1'] = 'Max CPC'
ws['F1'] = 'Final URL Google'
ws['G1'] = 'Final URL Bing'
ws['H1'] = 'BR/UB'
column_title(wb["Build Out"])
#keyword fill function
def kfill(worksheet_name):
ws = worksheet_name
broad_1 = ["+" + suit for suit in info[0]]
broad_2 = [x.replace(' ',' +') for x in broad_1]
keywords_all = info[0] + broad_2
r=2
for word in keywords_all:
for i in range(3):
ws.cell(row=r, column=3).value = word
r+=1
kfill(wb["Build Out"])
#match type/Max CPC/BN/UB fill function
def mfill(worksheet_name):
ws = worksheet_name
r = 2
#iterates through rows and adds formula
#Match Type
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
if '+' in ws.cell(row=r, column=3).value:
ws.cell(row=r, column=4).value = 'Broad'
else:
ws.cell(row=r, column=4).value = 'Exact'
if ws.cell(row=r, column=4).value =='Broad':
ws.cell(row=r, column=5).value = 8
else:
ws.cell(row=r, column=5).value = 5
oracle = ws.cell(row=r, column=3).value
if any((word).lower() in ws.cell(row=r, column=3).value for word in info[1]):
ws.cell(row=r, column=8).value = 'BR'
else:
ws.cell(row=r, column=8).value = 'UB'
r += 1
mfill(wb["Build Out"])
def ufill(worksheet_name, landingpage_google, landingpage_bing):
ws = worksheet_name
r = 2
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
for i in range(3):
ws.cell(row=r, column=6).value = str(info[-2])
ws.cell(row=r, column=7).value = str(info[-1])
r += 1
ufill(wb["Build Out"], url_google, url_bing)
#fill in ad groups
def afill(worksheet_name):
ws = worksheet_name
r = 2
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BR':
ws.cell(row=r, column=2).value = taxonomy[4]
ws.cell(row=r+1, column=2).value = taxonomy[5]
ws.cell(row=r+2, column=2).value = taxonomy[6]
elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':
ws.cell(row=r, column=2).value = taxonomy[10]
ws.cell(row=r+1, column=2).value = taxonomy[11]
ws.cell(row=r+2, column=2).value = taxonomy[12]
elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BR':
ws.cell(row=r, column=2).value = taxonomy[7]
ws.cell(row=r+1, column=2).value = taxonomy[8]
ws.cell(row=r+2, column=2).value = taxonomy[9]
else:
ws.cell(row=r, column=2).value = taxonomy[13]
ws.cell(row=r+1, column=2).value = taxonomy[14]
ws.cell(row=r+2, column=2).value = taxonomy[15]
r+=3
afill(wb["Build Out"])
def cfill(worksheet_name):
ws = worksheet_name
r = 2
for cell in ws:
if ws.cell(row=r, column=3).value is not None:
if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BR':
for i in range(3):
ws.cell(row=r, column=1).value = taxonomy[0]
elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':
for i in range(3):
ws.cell(row=r, column=1).value = taxonomy[2]
elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BR':
for i in range(3):
ws.cell(row=r, column=1).value = taxonomy[1]
else:
for i in range(3):
ws.cell(row=r, column=1).value = taxonomy[3]
r +=1
cfill(wb["Build Out"])
wb.save(filename="Build Out_Audience_Pilot.xlsx")
def close_window():
root.destroy()
button1 = tk.Button(text='Create Build',fg='green',bg='dark gray', command=createBuild, anchor ='w')
button2 = tk.Button(text='Close window', fg='red', command=close_window, anchor ='w')
button1.pack()
button2.pack()
root.mainloop() | [
"noreply@github.com"
] | Daktic.noreply@github.com |
2956c6fdf06f6904206cf0859be52a9be9d711d9 | 983e8054677b225a3adfde8610c45089562a14e1 | /src/AST/StatementType/BreakStatement.py | 2e1a8530f19fe3a2786eacf84b2c2426b5ee705a | [] | no_license | Roooooobin/final-project-for-Practice-of-Compiler-Construction | 792c8e621b891d0936db7eb788b60d28f8d7b5bf | c1fa44080fa3612b73c5e709693e6419412a2d10 | refs/heads/master | 2020-08-27T23:05:43.109765 | 2019-11-08T01:41:56 | 2019-11-08T01:41:56 | 217,514,218 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | """
# -*- coding: utf-8 -*-
# @FileName: BreakStatement.py
# @Author : Robin
# @Time : 2019/11/2 8:57
"""
from src.AST.Statement import Statement
from src.utils import padding
class BreakStatement(Statement):
def __init__(self, symbol_table):
Statement.__init__(self)
self.symbol_table = symbol_table
def __str__(self):
return "Break\n"
# break直接跳出循环
def compile(self):
return "ujp " + self.symbol_table.get_end_loop() + "\n"
def serialize(self, level):
output = padding(level) + "BreakStatement\n"
return output
| [
"35558127+Roooooobin@users.noreply.github.com"
] | 35558127+Roooooobin@users.noreply.github.com |
a88bfecab75a40b7761680eb84e4ed65a6945aaa | 1323a2b442d74ddce7ac380277c94674d28bd32e | /Leetcode/Find the Duplicate Number.py | 3b57a89a47b6e3756013e07384d7eee087c28b56 | [] | no_license | hemantj99/Daily-DSA | 343f5be5e9ab276ccd52c7969e13157903998f9e | d9cf4fb717481a7dc4071f8c3317984e10f52293 | refs/heads/main | 2023-02-15T06:23:45.886611 | 2021-01-12T16:00:37 | 2021-01-12T16:00:37 | 319,724,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | '''
Given an array of integers nums containing n + 1 integers where each integer is in the range [1, n] inclusive.
There is only one duplicate number in nums, return this duplicate number.
Follow-ups:
How can we prove that at least one duplicate number must exist in nums?
Can you solve the problem without modifying the array nums?
Can you solve the problem using only constant, O(1) extra space?
Can you solve the problem with runtime complexity less than O(n2)?
Example 1:
Input: nums = [1,3,4,2,2]
Output: 2
Example 2:
Input: nums = [3,1,3,4,2]
Output: 3
Example 3:
Input: nums = [1,1]
Output: 1
Example 4:
Input: nums = [1,1,2]
Output: 1
'''
class Solution:
def findDuplicate(self, nums: List[int]) -> int:
seen = set()
for num in nums:
if num in seen:
return num
else:
seen.add(num)
| [
"noreply@github.com"
] | hemantj99.noreply@github.com |
97e846184458063782010c1e81c8866b524d29dc | 407fcc31a4aa5debc7c88a4b66d5cffee747db32 | /insert_stock_daily_data_to_db.py | 351d5689fb2176fd07b55e81c597a66dc38685a2 | [] | no_license | YajunLi/database | 238eaa0f82892ac68b002230169b3adad6cb7986 | 463a660e7a60f02d7f26cae1ffa7654addeb7573 | refs/heads/master | 2020-06-23T21:42:24.763750 | 2019-07-25T05:11:49 | 2019-07-25T05:11:49 | 198,761,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,260 | py | import pandas as pd
from sqlalchemy import create_engine
import io
import os
import logging
logger = logging.getLogger('TickDataToDatabase')
logger.setLevel(level=logging.INFO)
handler = logging.FileHandler("..\log_file\\log_%s.txt" %pd.datetime.now().date())
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
def insert_csv_to_database(files):
"""
将csv数据读取到数据库中
:param files: csv数据列表
:return: no return
"""
conn = create_engine('postgres+psycopg2://julin:123456@localhost:5432/BrooksCapital', echo=True)
pd_sql_engine = pd.io.sql.pandasSQL_builder(conn)
try:
file = files
logger.info('get csv file :%s' % file)
except IndexError :
print("queue empty, please hold on")
# 读取csv文件,并导入至数据库
data = pd.read_csv(file, encoding='ANSI', index_col=0, )
data = data.rename(columns={'factor':'factors'})
data = data[['date', 'stock_code', 'open','high', 'low', 'close', 'prev_close', 'is_paused','is_st',
'turnover', 'volume', 'factors']]
string_data_io = io.StringIO()
data.to_csv(string_data_io, sep='|', index=False)
try:
# pd.io.sql.to_sql(data,'stocktick',con=conn,schema='public',if_exists='append', index=False)
table = pd.io.sql.SQLTable('daily_stock_price_info', pd_sql_engine, frame=data,
index=False, if_exists='append', schema='public')
table.create()
string_data_io.seek(0)
string_data_io.readline() # remove header
with conn.connect() as connection:
with connection.connection.cursor() as cursor:
copy_cmd = "COPY public.daily_stock_price_info FROM STDIN HEADER DELIMITER '|' CSV"
cursor.copy_expert(copy_cmd, string_data_io)
connection.connection.commit()
except Exception as info:
logger.info(info)
logger.info('%s :data write over ' % file)
if __name__ == '__main__':
files = 'stocks_daily_data.csv'
insert_csv_to_database(files)
| [
"noreply@github.com"
] | YajunLi.noreply@github.com |
eb4589b44e7eb4fed996d31a623846c71947bf0d | b44f329656dee6472cd2865a6c8dbc8df77c980d | /old/workerA.py | 7999783018c61515b3601dd0fd269e2f92a4b7f3 | [
"NTP-0"
] | permissive | cmconner156/trade_manager | 6aec906284a164df599af812f5f67b44b95111e6 | 88de8a5a875f9c4d1378d033745db340e53dfe2e | refs/heads/master | 2022-01-25T03:33:33.225443 | 2020-01-01T21:46:33 | 2020-01-01T21:46:33 | 220,337,518 | 0 | 1 | null | 2022-01-06T22:39:33 | 2019-11-07T22:06:00 | Python | UTF-8 | Python | false | false | 297 | py | from celery import Celery
# Celery configuration
CELERY_BROKER_URL = 'amqp://rabbitmq:rabbitmq@rabbit:5672/'
CELERY_RESULT_BACKEND = 'rpc://'
# Initialize Celery
celery = Celery('workerA', broker=CELERY_BROKER_URL, backend=CELERY_RESULT_BACKEND)
@celery.task()
def add_nums(a, b):
return a + b | [
"cconner@cloudera.com"
] | cconner@cloudera.com |
fb6bf96bc3c51a06e14870d39c750662ec27bf21 | d395c3072ade56c4ee68c0d054d4f2510faf813f | /accounts/migrations/0001_initial.py | 0381baef12141bcbc4062f0b422bf96caab6cf14 | [] | no_license | spassaro80/solomartel | 23b6ebdae1d956346d4d4f3953ea32177c63b88f | da7f2c9e7ce0c3f89a267b5f821f59a9163bbb8d | refs/heads/master | 2023-05-24T07:31:41.438755 | 2020-06-12T16:45:43 | 2020-06-12T16:45:43 | 271,842,750 | 0 | 0 | null | 2021-06-10T23:02:53 | 2020-06-12T16:26:27 | HTML | UTF-8 | Python | false | false | 3,215 | py | # Generated by Django 2.0.4 on 2020-05-28 22:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('Checking', 'Checking'), ('Saving', 'Saving'), ('Retirement', 'Retirement')], max_length=255)),
('account_number', models.IntegerField()),
('funds', models.DecimalField(decimal_places=2, default=0, max_digits=10, verbose_name='Funds ($)')),
],
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('legal_name', models.CharField(max_length=255)),
('address', models.CharField(max_length=255)),
('region', models.CharField(max_length=255)),
('zip', models.IntegerField()),
('country', models.CharField(max_length=255)),
('city', models.CharField(max_length=255)),
('phone_number', models.IntegerField(null=True)),
('account_number', models.IntegerField()),
('bank_name', models.CharField(max_length=255)),
('swift_code', models.CharField(max_length=255)),
('ABA_number', models.IntegerField(default=0)),
('SSN', models.IntegerField(default=0)),
('notes', models.TextField()),
],
),
migrations.CreateModel(
name='Entity',
fields=[
('client_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='accounts.Client')),
('type', models.CharField(choices=[('Company', 'Company'), ('Trust', 'Trust'), ('Institutional account', 'Institutional account'), ('Other entity', 'Other entity')], max_length=255)),
],
bases=('accounts.client',),
),
migrations.CreateModel(
name='Private',
fields=[
('client_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='accounts.Client')),
],
bases=('accounts.client',),
),
migrations.AddField(
model_name='client',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='account',
name='owner',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='my_account', to='accounts.Private'),
),
]
| [
"stefano.passaro@gmail.com"
] | stefano.passaro@gmail.com |
aa8131480478b18d37db2f7289886a67b1ce0d30 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/Scaleform/daapi/view/meta/IngameDetailsHelpWindowMeta.py | dbfa7dc67745ed3fc5561ecd88202c1b71e07635 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 652 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/IngameDetailsHelpWindowMeta.py
from gui.Scaleform.framework.entities.abstract.AbstractWindowView import AbstractWindowView
class IngameDetailsHelpWindowMeta(AbstractWindowView):
def requestPageData(self, index):
self._printOverrideError('requestPageData')
def as_setPaginatorDataS(self, pages):
return self.flashObject.as_setPaginatorData(pages) if self._isDAAPIInited() else None
def as_setPageDataS(self, data):
return self.flashObject.as_setPageData(data) if self._isDAAPIInited() else None
| [
"StranikS_Scan@mail.ru"
] | StranikS_Scan@mail.ru |
0e6c2ac755ecf45fc4a0cfe0470f37f85a5c9859 | ca66a4283c5137f835377c3ed9a37128fcaed037 | /Lib/site-packages/sklearn/manifold/__init__.py | 60e17b6bfd918a11c0a1c25a5a6062760e777a68 | [] | no_license | NamithaKonda09/majorProject | f377f7a77d40939a659a3e59f5f1b771d88889ad | 4eff4ff18fa828c6278b00244ff2e66522e0cd51 | refs/heads/master | 2023-06-04T20:25:38.450271 | 2021-06-24T19:03:46 | 2021-06-24T19:03:46 | 370,240,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | """
The :mod:`sklearn.manifold` module implements data embedding techniques.
"""
from .locally_linear import locally_linear_embedding, LocallyLinearEmbedding
from .isomap import Isomap
from .mds import MDS, smacof
from .spectral_embedding_ import SpectralEmbedding, spectral_embedding
from .t_sne import TSNE
__all__ = ['locally_linear_embedding', 'LocallyLinearEmbedding', 'Isomap',
'MDS', 'smacof', 'SpectralEmbedding', 'spectral_embedding', "TSNE"]
| [
"namithakonda09@gmail.com"
] | namithakonda09@gmail.com |
ad49db403268f57ba29c1ce03842ae779925e467 | 1b381a163a938182bca8afa0ac7313a93870052e | /unsuper/unused/vae_mlp.py | 6e631ff4bed2a7c096804ce3f247867ca091cbb8 | [] | no_license | SkafteNicki/unsuper | 359f3ea7c0a477eac5e09791ed39cda73ac7b990 | dc0347e11f8f2d8b77d8c93526682b745d24d57c | refs/heads/master | 2020-03-28T23:03:34.608808 | 2019-10-26T18:55:28 | 2019-10-26T18:55:28 | 149,273,931 | 14 | 4 | null | null | null | null | UTF-8 | Python | false | false | 3,080 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 30 09:25:49 2018
@author: nsde
"""
#%%
import torch
from torch import nn
import numpy as np
from torchvision.utils import make_grid
from ..helper.losses import ELBO
#%%
class VAE_Mlp(nn.Module):
def __init__(self, input_shape, latent_dim, **kwargs):
super(VAE_Mlp, self).__init__()
# Constants
self.input_shape = input_shape
self.flat_dim = np.prod(input_shape)
self.latent_dim = [latent_dim]
# Define encoder and decoder
self.encoder = nn.Sequential(
nn.BatchNorm1d(self.flat_dim),
nn.Linear(self.flat_dim, 512),
nn.LeakyReLU(),
nn.Linear(512, 256),
nn.LeakyReLU()
)
self.z_mean = nn.Linear(256, latent_dim)
self.z_var = nn.Linear(256, latent_dim)
self.decoder = nn.Sequential(
nn.Linear(latent_dim, 256),
nn.LeakyReLU(),
nn.Linear(256, 512),
nn.LeakyReLU(),
nn.Linear(512, self.flat_dim),
nn.Sigmoid()
)
#%%
def encode(self, x):
x = self.encoder(x.view(x.shape[0], -1))
mu = self.z_mean(x)
logvar = self.z_var(x)
return mu, logvar
#%%
def decode(self, z):
out = self.decoder(z)
return out.view(-1, *self.input_shape)
#%%
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return eps.mul(std).add(mu)
else:
return mu
#%%
def forward(self, x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
out = self.decode(z)
return out, [mu], [logvar]
#%%
def sample(self, n):
device = next(self.parameters()).device
with torch.no_grad():
z = torch.randn(n, self.latent_dim[0], device=device)
out = self.decode(z)
return out
#%%
def latent_representation(self, x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return [z]
#%%
def loss_f(self, data, recon_data, mus, logvars, epoch, warmup):
return ELBO(data, recon_data, mus, logvars, epoch, warmup)
#%%
def __len__(self):
return 1
#%%
def callback(self, writer, loader, epoch):
# If 2d latent space we can make a fine meshgrid of sampled points
if self.latent_dim[0] == 2:
device = next(self.parameters()).device
x = np.linspace(-3, 3, 20)
y = np.linspace(-3, 3, 20)
z = np.stack([array.flatten() for array in np.meshgrid(x,y)], axis=1)
z = torch.tensor(z, dtype=torch.float32)
out = self.decode(z.to(device))
writer.add_image('samples/meshgrid', make_grid(out.cpu(), nrow=20),
global_step=epoch)
#%%
if __name__ == '__main__':
model = VAE_Mlp((1, 28, 28), 32)
| [
"skaftenicki@gmail.com"
] | skaftenicki@gmail.com |
8b1128a91a0db4a66e0216eb9d12a56b78ab3db5 | a0cbb6aba9214fb4ef20cdedf933bf81a77bf037 | /venv/lib/python3.8/site-packages/pandas/tests/dtypes/test_dtypes.py | 4500f944026d126e91662932ee724989777ace2e | [] | no_license | ShukhratSh/firstrepo | f9cb68ce4c5feb1582d769540e17795105768b95 | 33ebd26b8332a70772dd3069a9b54f7f53f009e1 | refs/heads/main | 2022-12-06T11:08:09.723004 | 2022-11-29T12:01:45 | 2022-11-29T12:01:45 | 33,893,716 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/13/a4/52/2461855ef9d9983e59870146656f042d7d8886931d90b4555ef9efba55 | [
""
] | |
cf30bd306af294595dce71b6a3e2f4cd9bbe10a6 | 3ba975cb0b4b12510f05b9970f955b98ce576124 | /setup.py | cd9ddfb83ebe7ed71de9d5e82960499e761b8f73 | [
"MIT"
] | permissive | vidrafeed/terminaltables | 44d3b85e775138fa16f36f65be7a1bc0e89c04be | da4531bf0002051b3bcd97098acbe09c22ee7736 | refs/heads/master | 2020-12-30T20:04:31.298279 | 2015-03-22T22:34:43 | 2015-03-22T22:34:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,889 | py | #!/usr/bin/env python
import atexit
from codecs import open
from distutils.spawn import find_executable
import os
import re
import sys
import subprocess
import setuptools.command.sdist
from setuptools.command.test import test
_JOIN = lambda *p: os.path.join(HERE, *p)
_PACKAGES = lambda: [os.path.join(r, s) for r, d, _ in os.walk(NAME_FILE) for s in d if s != '__pycache__']
_REQUIRES = lambda p: [i for i in open(_JOIN(p), encoding='utf-8') if i[0] != '-'] if os.path.exists(_JOIN(p)) else []
_SAFE_READ = lambda f, l: open(_JOIN(f), encoding='utf-8').read(l) if os.path.exists(_JOIN(f)) else ''
_VERSION_RE = re.compile(r"^__(version|author|license)__ = '([\w\.@]+)'$", re.MULTILINE)
CLASSIFIERS = (
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Environment :: MacOS X',
'Environment :: Win32 (MS Windows)',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Libraries',
'Topic :: Terminals',
'Topic :: Text Processing :: Markup',
)
DESCRIPTION = 'Generate simple tables in terminals from a nested list of strings.'
HERE = os.path.abspath(os.path.dirname(__file__))
KEYWORDS = 'Shell Bash ANSI ASCII terminal tables'
NAME = 'terminaltables'
NAME_FILE = NAME
PACKAGE = False
VERSION_FILE = os.path.join(NAME_FILE, '__init__.py') if PACKAGE else '{0}.py'.format(NAME_FILE)
class PyTest(test):
description = 'Run all tests.'
user_options = []
CMD = 'test'
TEST_ARGS = ['--cov-report', 'term-missing', '--cov', NAME_FILE, 'tests']
def finalize_options(self):
overflow_args = sys.argv[sys.argv.index(self.CMD) + 1:]
test.finalize_options(self)
setattr(self, 'test_args', self.TEST_ARGS + overflow_args)
setattr(self, 'test_suite', True)
def run_tests(self):
# Import here, cause outside the eggs aren't loaded.
pytest = __import__('pytest')
err_no = pytest.main(self.test_args)
sys.exit(err_no)
class PyTestPdb(PyTest):
description = 'Run all tests, drops to ipdb upon unhandled exception.'
CMD = 'testpdb'
TEST_ARGS = ['--ipdb', 'tests']
class PyTestCovWeb(PyTest):
description = 'Generates HTML report on test coverage.'
CMD = 'testcovweb'
TEST_ARGS = ['--cov-report', 'html', '--cov', NAME_FILE, 'tests']
def run_tests(self):
if find_executable('open'):
atexit.register(lambda: subprocess.call(['open', _JOIN('htmlcov', 'index.html')]))
PyTest.run_tests(self)
ALL_DATA = dict(
author_email='robpol86@gmail.com',
classifiers=CLASSIFIERS,
cmdclass={PyTest.CMD: PyTest, PyTestPdb.CMD: PyTestPdb, PyTestCovWeb.CMD: PyTestCovWeb},
description=DESCRIPTION,
install_requires=_REQUIRES('requirements.txt'),
keywords=KEYWORDS,
long_description=_SAFE_READ('README.rst', 15000),
name=NAME,
tests_require=_REQUIRES('requirements-test.txt'),
url='https://github.com/Robpol86/{0}'.format(NAME),
zip_safe=True,
)
# noinspection PyTypeChecker
ALL_DATA.update(dict(_VERSION_RE.findall(_SAFE_READ(VERSION_FILE, 1500).replace('\r\n', '\n'))))
ALL_DATA.update(dict(py_modules=[NAME_FILE]) if not PACKAGE else dict(packages=[NAME_FILE] + _PACKAGES()))
if __name__ == '__main__':
if not all((ALL_DATA['author'], ALL_DATA['license'], ALL_DATA['version'])):
raise ValueError('Failed to obtain metadata from package/module.')
setuptools.setup(**ALL_DATA)
| [
"robpol86@gmail.com"
] | robpol86@gmail.com |
edab80580239788f02dd482398043e11eebd0d60 | b72024bf483ea4e9e746158a021b992fe8a5a659 | /7.Tablas.py | 546311fea9938f2a1d5d408dde359c6f64c11e13 | [] | no_license | SteveGongoraL/AprendaPython | f8c26428f4985f8409880be20932375a7378fd38 | 715af52a15fab56924ce2c519ce8ff56811361a2 | refs/heads/master | 2020-07-30T11:05:34.098915 | 2019-09-22T20:03:14 | 2019-09-22T20:03:14 | 210,206,620 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 564 | py | for i in range(1,11):
encabezado="Tabla del {}"
print(encabezado.format(i))
# Si dejas el print en blanco es igual a un salto de linea.
print()
# Dentro del for puede ir otro for.
# Se pone el numero de tablas que deseas +1 en range.
for j in range(1,11):
# La i contiene el numero base de la tabla y j el elemento por el que se multiplicara.
salida="{} x {} = {}"
print(salida.format(i,j,i*j))
else:
# Al concluir se ejecutara este codigo que es un salto de linea.
print()
| [
"noreply@github.com"
] | SteveGongoraL.noreply@github.com |
db05ce3e4b46c0f089891128914445f70f7dcc5d | 272082074bb4c0d10a448de34abfb0fad60a279c | /venv/bin/py.test | ec7c38e8a29b3ab5518fa89b2863ca60db731357 | [] | no_license | christopheRur/simple_API_python | f256106186a565d06c9f71465162cd209c909a1b | bec4f495319d9a97684f512a690a95b572cae85a | refs/heads/master | 2023-02-21T05:13:36.875073 | 2021-01-19T20:20:49 | 2021-01-19T20:20:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | test | #!/Users/christopherurangwa/PycharmProjects/JSONAPI/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pytest import console_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(console_main())
| [
"christopherurangwa@christophes-mbp.attlocal.net"
] | christopherurangwa@christophes-mbp.attlocal.net |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.