blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eaebd03ea731b9da12e1adf800467f27272fb3d9 | 9d325c7d60f1d80881e666fc441d62e84d367754 | /main.py | c16598b47af3b1aec66ce47224e3019b8ba55be3 | [] | no_license | aursid/embedded_1 | e1d6211e4e3acc2d7e8fb9d62aca0a27b8fd14cb | 82fb14d5e028e8fc3c2e2e53ad86fd4891b6ee8c | refs/heads/master | 2021-01-20T11:34:29.469806 | 2017-02-15T23:41:09 | 2017-02-15T23:41:09 | 81,445,710 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,336 | py | import json
import network
from umqtt.simple import MQTTClient
import machine
import time
from machine import Pin,I2C,RTC
import manipulatingdata
import formattime
THRESHOLD = 0.05
N_LARGEST_VALUES = 3
CTRL_REG1 = 0x20 #Control the sampling rate and take the board off low power mode.
LIS3DH_REG_CTRL4 = 0x23
OUT_X_L = 0x28 #LsB of X-Axis
OUT_X_H = 0x29 #MsB of X-Axis
OUT_Y_L = 0x2A #LsB of Y-Axis
OUT_Y_H = 0x2B #MsB of Y-Axis
OUT_Z_L = 0x2C #LsB of Z-Axis
OUT_Z_H = 0x2D #MsB of Z-Axis
DIVIDER = 16380
FIFTY_HZ_SAMPLING = 71
THRESHOLD_LDR = 2
#This class gets the current value of the time to display in RFC format
class get_current_time():
def __init__(self):
self.time = 0
def update_time(self,msg):
self.time = msg
def sub_cb(topic,msg):
stott_time = str(msg,'utf-8')
currenttime.update_time(stott_time)
def creatingPayload(number_of_values, nLargestValues, nLargestValues_time, xyz):
addG = 'g '.join(map(str,nLargestValues)) + "g"
acc_data = ["%d max %s impact values: " %(number_of_values,xyz) + addG, nLargestValues_time]
payload = json.dumps(acc_data)
return payload
def connectToWifi(sta_if):
sta_if.active(True)
sta_if.connect('EEERover','exhibition')
while(not sta_if.isconnected()):
pass
time.sleep(0.5)
#Setting up the LED for control
led = machine.Pin(16,machine.Pin.OUT)
#Connect to the EEERover
sta_if = network.WLAN(network.STA_IF)
connectToWifi(sta_if)
#Connecting to the MQTT Broker for getting the time
client = MQTTClient('unnamed1','192.168.0.10')
client.connect()
currenttime = get_current_time()
client.set_callback(sub_cb)
client.subscribe(b'esys/time')
client.wait_msg()
client.check_msg()
time_right_now = json.loads(currenttime.time)["date"] #Gets the RFC string of the time at that instant
#Turn LED off when microcontroller has gotten time
if(len(time_right_now) > 0):
led.low()
#Place time in the appropriate tuple for Machine.RTC
year = int(time_right_now[0:4])
month = int(time_right_now[5:7])
day = int(time_right_now[8:10])
hour = int(time_right_now[11:13])
minute = int(time_right_now[14:16])
second = int(time_right_now[17:19])
weekday = 4
rtc = machine.RTC()
rtc.datetime((year, month, day, weekday, hour, minute, second, 0))
#Disconnect from wifi
sta_if.active(False)
#Find out which sensor is connected to the NodeMCU
i2c = I2C(scl = Pin(5),sda = Pin(4),freq = 500000)
addr_list = i2c.scan() #for switch control.
#Start sensing when there is an address
while(len(addr_list) == 0):
addr_list = i2c.scan()
if(len(addr_list) > 0):
break
time.sleep(1)
addr = addr_list[0] #Will assign an addr if a sensor is connected
i2c.writeto_mem(addr, CTRL_REG1, bytearray([FIFTY_HZ_SAMPLING])) #Set sampling rate
#Collecting data from the sensor
nLargestValues = [[] for q in range(3)]
nLargestValues_time = [[] for w in range(3)]
countXYZ = [0, 0, 0]
open_times = []
adc = machine.ADC(0)
while(len(addr_list) > 0):
firstValues = [manipulatingdata.normalise(i2c.readfrom_mem(addr,OUT_X_L,1), i2c.readfrom_mem(addr,OUT_X_H,1), DIVIDER),
manipulatingdata.normalise(i2c.readfrom_mem(addr,OUT_Y_L,1), i2c.readfrom_mem(addr,OUT_Y_H,1), DIVIDER),
manipulatingdata.normalise(i2c.readfrom_mem(addr,OUT_Z_L,1), i2c.readfrom_mem(addr,OUT_Z_H,1), DIVIDER)]
secondValues = [manipulatingdata.normalise(i2c.readfrom_mem(addr,OUT_X_L,1), i2c.readfrom_mem(addr,OUT_X_H,1), DIVIDER),
manipulatingdata.normalise(i2c.readfrom_mem(addr,OUT_Y_L,1), i2c.readfrom_mem(addr,OUT_Y_H,1), DIVIDER),
manipulatingdata.normalise(i2c.readfrom_mem(addr,OUT_Z_L,1), i2c.readfrom_mem(addr,OUT_Z_H,1), DIVIDER)]
for f in range(0, 3):
diff = abs(secondValues[f] - firstValues[f])
if(diff > THRESHOLD):
if(countXYZ[f] > N_LARGEST_VALUES - 1):
index = manipulatingdata.minimum1(nLargestValues[f]) #problem here, second index for nLargestValues
if(diff > nLargestValues[f][index]):
nLargestValues[f][index] = diff
nLargestValues_time[f][index] = rtc.datetime()
else:
nLargestValues[f].append(diff)
nLargestValues_time[f].append(rtc.datetime())
countXYZ[f] += 1
#Check to see if suitcase is open
if(adc.read()*(3.3/1023) > THRESHOLD_LDR):
if(len(open_times) < 1):
open_times.append(rtc.datetime())
time.sleep(0.05)
addr_list = i2c.scan()
#Send data to the broker when switch is turned off
connectToWifi(sta_if)
client = MQTTClient('unnamed1','192.168.0.10')
client.connect()
xyz = ["x", "y", "z"]
for i in range(0, 3):
nLargestValues_time[i] = formattime.convert_time(nLargestValues_time[i])
payload = creatingPayload(N_LARGEST_VALUES, nLargestValues[i], nLargestValues_time[i], xyz[i])
client.publish('/unnamed1/test', bytes(payload, 'utf-8'))
if(len(open_times) > 0):
open_times = "Your suitcase was opened at: " + str(formattime.convert_time(open_times))
payload_times = json.dumps(open_times)
client.publish('/unnamed1/test', bytes(payload_times, 'utf-8'))
else:
client.publish('/unnamed1/test', bytes("Your suitcase was never opened.", 'utf-8'))
#Set LED high to indicate that data has been sent
led.high()
| [
"JR"
] | JR |
05b4b577b4f16103c73d1b70c7d3d7a273a31900 | 971e5f16b502ae6221c2d32f5002435769d35fcc | /displayrecord.cgi | 09f7b8f6a6b2be2d1c8e5bde4a4668f38e4fb499 | [] | no_license | shubhsin/Python-CGI-Tut | fe66ff0b7eccd7abbfd922e9c7c124b58b28c004 | 002e0410d33a9912381c41e4e0414efcb79ebd79 | refs/heads/master | 2021-01-10T03:51:28.174515 | 2016-03-20T14:06:03 | 2016-03-20T14:06:03 | 54,320,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,131 | cgi | #! /usr/bin/python
import cgi
import mysql.connector as conn
def htmlTop():
print """Content-Type:text/html\n\n
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8"/>
<title>My server-side template</title>
<body>"""
def connectDB():
db = conn.connect(host='localhost',port=8889,user='root',passwd='root',db='exampledb')
cursor = db.cursor()
return db,cursor
def selectPeople(db,cursor):
sql = "select * from person"
cursor.execute(sql)
#fetch the results as a list
people = cursor.fetchall()
return people
def displayPeople(people):
print "<table border>"
print "<tr>"
print "<th>ID</th>"
print "<th>List Name</th>"
print "</tr>"
for each in people:
print "<tr>"
print "<td>{0}</td>".format(each[0])
print "<td>{0}</td>".format(each[1])
print "<td>{0}</td>".format(each[2])
print "</tr>"
print "</table>"
def htmlTail():
print """</body>
</html>"""
#main program
if __name__ == "__main__":
try:
htmlTop()
db,cursor = connectDB()
people = selectPeople(db,cursor)
cursor.close()
displayPeople(people)
htmlTail()
except:
cgi.print_exception() | [
"shubhamsorte@Shubhams-MacBook-Air.local"
] | shubhamsorte@Shubhams-MacBook-Air.local |
8daf9a4b21e188c25aa2f028ae77f55c5b3de47c | 95ab6507d94eccd3c25fae76e7b909c88c807b19 | /gen/Lib/site-packages/PyQt5/pylupdate_main.py | 489e58f3b9952c53a9ff45f7c7e47ec7f1507610 | [
"MIT"
] | permissive | BL4RC4N3-py/PG | 5d51952ea97f35540ddc73d1a1778fe27ce93d06 | dbdb7315abd9dd7449c46e158f1c76880332cb0d | refs/heads/master | 2023-04-01T11:03:56.142640 | 2021-03-24T13:54:35 | 2021-03-24T13:54:35 | 351,097,973 | 1 | 0 | MIT | 2021-03-24T13:54:36 | 2021-03-24T13:53:25 | null | UTF-8 | Python | false | false | 7,131 | py | # Copyright (c) 2020 Riverbank Computing Limited <info@riverbankcomputing.com>
#
# This file is part of PyQt5.
#
# This file may be used under the terms of the GNU General Public License
# version 3.0 as published by the Free Software Foundation and appearing in
# the file LICENSE included in the packaging of this file. Please review the
# following information to ensure the GNU General Public License version 3.0
# requirements will be met: http://www.gnu.org/copyleft/gpl.html.
#
# If you do not wish to use this file under the terms of the GPL version 3.0
# then you may purchase a commercial license. For more information contact
# info@riverbankcomputing.com.
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
import sys
from PyQt5.QtCore import (PYQT_VERSION_STR, QDir, QFile, QFileInfo, QIODevice,
QTextStream)
from .pylupdate import *
def printUsage():
sys.stderr.write(
"Usage:\n"
" pylupdate5 [options] project-file\n"
" pylupdate5 [options] source-files -ts ts-files\n"
"\n"
"Options:\n"
" -help Display this information and exit\n"
" -version\n"
" Display the version of pylupdate5 and exit\n"
" -verbose\n"
" Explain what is being done\n"
" -noobsolete\n"
" Drop all obsolete strings\n"
" -tr-function name\n"
" name() may be used instead of tr()\n"
" -translate-function name\n"
" name() may be used instead of translate()\n");
def updateTsFiles(fetchedTor, tsFileNames, codecForTr, noObsolete, verbose):
dir = QDir()
for t in tsFileNames:
fn = dir.relativeFilePath(t)
tor = MetaTranslator()
out = MetaTranslator()
tor.load(t)
if codecForTr:
tor.setCodec(codecForTr)
merge(tor, fetchedTor, out, noObsolete, verbose, fn)
if noObsolete:
out.stripObsoleteMessages()
out.stripEmptyContexts()
if not out.save(t):
sys.stderr.write("pylupdate5 error: Cannot save '%s'\n" % fn)
def main():
# Initialise.
defaultContext = "@default"
fetchedTor = MetaTranslator()
codecForTr = ''
codecForSource = ''
tsFileNames = []
uiFileNames = []
verbose = False
noObsolete = False
metSomething = False
numFiles = 0
standardSyntax = True
metTsFlag = False
tr_func = None
translate_func = None
# Parse the command line.
for arg in sys.argv[1:]:
if arg == "-ts":
standardSyntax = False
argc = len(sys.argv)
i = 1
while i < argc:
arg = sys.argv[i]
i += 1
if arg == "-help":
printUsage()
sys.exit(0)
if arg == "-version":
sys.stderr.write("pylupdate5 v%s\n" % PYQT_VERSION_STR)
sys.exit(0)
if arg == "-noobsolete":
noObsolete = True
continue
if arg == "-verbose":
verbose = True
continue
if arg == "-ts":
metTsFlag = True
continue
if arg == "-tr-function":
if i >= argc:
sys.stderr.write(
"pylupdate5 error: missing -tr-function name\n")
sys.exit(2)
tr_func = sys.argv[i]
i += 1
continue
if arg == "-translate-function":
if i >= argc:
sys.stderr.write(
"pylupdate5 error: missing -translate-function name\n")
sys.exit(2)
translate_func = sys.argv[i]
i += 1
continue
numFiles += 1
fullText = ""
if not metTsFlag:
f = QFile(arg)
if not f.open(QIODevice.ReadOnly):
sys.stderr.write(
"pylupdate5 error: Cannot open file '%s'\n" % arg)
sys.exit(1)
t = QTextStream(f)
fullText = t.readAll()
f.close()
if standardSyntax:
oldDir = QDir.currentPath()
QDir.setCurrent(QFileInfo(arg).path())
fetchedTor = MetaTranslator()
codecForTr = ''
codecForSource = ''
tsFileNames = []
uiFileNames = []
for key, value in proFileTagMap(fullText).items():
for t in value.split(' '):
if key == "SOURCES":
fetchtr_py(QDir.current().absoluteFilePath(t),
fetchedTor, defaultContext, True,
codecForSource, tr_func, translate_func)
metSomething = True
elif key == "TRANSLATIONS":
tsFileNames.append(QDir.current().absoluteFilePath(t))
metSomething = True
elif key in ("CODEC", "DEFAULTCODEC", "CODECFORTR"):
codecForTr = t
fetchedTor.setCodec(codecForTr)
elif key == "CODECFORSRC":
codecForSource = t
elif key == "FORMS":
fetchtr_ui(QDir.current().absoluteFilePath(t),
fetchedTor, defaultContext, True)
updateTsFiles(fetchedTor, tsFileNames, codecForTr, noObsolete,
verbose)
if not metSomething:
sys.stderr.write(
"pylupdate5 warning: File '%s' does not look like a "
"project file\n" % arg)
elif len(tsFileNames) == 0:
sys.stderr.write(
"pylupdate5 warning: Met no 'TRANSLATIONS' entry in "
"project file '%s'\n" % arg)
QDir.setCurrent(oldDir)
else:
if metTsFlag:
if arg.lower().endswith(".ts"):
fi = QFileInfo(arg)
if not fi.exists() or fi.isWritable():
tsFileNames.append(arg)
else:
sys.stderr.write(
"pylupdate5 warning: For some reason, I "
"cannot save '%s'\n" % arg)
else:
sys.stderr.write(
"pylupdate5 error: File '%s' lacks .ts extension\n" % arg)
else:
fi = QFileInfo(arg)
if fi.suffix() in ("py", "pyw"):
fetchtr_py(fi.absoluteFilePath(), fetchedTor,
defaultContext, True, codecForSource, tr_func,
translate_func)
else:
fetchtr_ui(fi.absoluteFilePath(), fetchedTor,
defaultContext, True)
if not standardSyntax:
updateTsFiles(fetchedTor, tsFileNames, codecForTr, noObsolete, verbose)
if numFiles == 0:
printUsage()
sys.exit(1)
if __name__ == '__main__':
main()
| [
"arcaneisc00l@gmail.com"
] | arcaneisc00l@gmail.com |
b825424bd57f66344914164ba6bb4cd58f193017 | 87ab14c77b22ea7811b0d0c2c7290963f7bd0787 | /typeidea/settings/base.py | 6af6fdc687bcdd4f8dc6c72cb9ede3d56a1c6935 | [] | no_license | wakenforest/wake_site | 1346073aaeaf0c9bef8af0cd654ad8aede540750 | 31c723cb56b1124eb7cecb3acff465bf2100971b | refs/heads/master | 2020-07-26T15:51:25.624866 | 2019-10-06T04:19:20 | 2019-10-06T04:19:20 | 208,695,714 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,763 | py | """
Django settings for typeidea project.
Generated by 'django-admin startproject' using Django 2.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '59ua%cg^ak66cn&+=(fozkf1#5%ic91m_9$be_7q29kpuxc4gw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'typeidea',
'blog',
'config',
'comment',
'rest_framework',
'dict_word',
'mongo',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'blog.middleware.user_id.UserIDMiddleware',
]
ROOT_URLCONF = 'typeidea.urls'
#THEME = 'default'
THEME = 'bootstrap'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'themes', THEME, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'typeidea.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
# STATIC_ROOT = '/tmp/static'
STATIC_ROOT = os.path.join(BASE_DIR,'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'themes', THEME, "static"),
'/typeidea/themes/bootstrap/static/',
#os.path.join(BASE_DIR, 'typeidea', 'themes', THEME, "static"),
]
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS':'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE':2,
} | [
"wangfei_thuee@126.com"
] | wangfei_thuee@126.com |
01ad45c6a11abca8efb3b48cd0e5972dce2e220c | aa4ecdb48f0c7352399c107dc82af1536b5c2c83 | /src/setup.py | 81feaa723ac5accee6ac8bf77728717ee5b39642 | [
"MIT"
] | permissive | NexSabre/pycreator | 798ad3d74267d07f8b1fa239c00c66b5f9394b6b | 3d6c5bc0620e179210ed405a81b913887e4af0ae | refs/heads/master | 2023-01-24T20:11:22.233641 | 2020-12-07T17:23:01 | 2020-12-07T17:23:01 | 298,025,759 | 3 | 0 | MIT | 2020-10-05T20:13:55 | 2020-09-23T16:13:22 | Python | UTF-8 | Python | false | false | 1,033 | py | import setuptools
with open("../README.md", "r") as fh:
long_description = fh.read()
with open("../VERSION", "r") as vr:
version_number = vr.read()
with open("../requirements.txt", "r") as req:
requirements = []
for l in req.readlines():
requirements.append(l.rstrip())
setuptools.setup(
name="pycreator",
version=version_number,
author="Nex Sabre",
author_email="nexsabre@protonmail.com",
description="Automatically create a command application",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/NexSabre/pycreator",
packages=setuptools.find_packages(),
package_data={'pycreator': ['templates/*.j2']},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
python_requires='>=3.6',
install_requires=requirements,
entry_points={
'console_scripts': [
'pycreator = pycreator.main.main:main'
],
},
)
| [
"NexSabre@users.noreply.github.com"
] | NexSabre@users.noreply.github.com |
6898f9f854cad5ac7c59a1e0131e3abba71f13ab | 8c2923f90e22ccb3c22bb720b860dcafb981dc28 | /8kyu-Reversed-Strings.py | d44c3826bcac67e34b26e091033414d680b3a5b3 | [] | no_license | Projeto-Front-end-Grupo2-M3/Codewars-Python | 22742348bcbaa432e884700a3ac019e5c96a099e | 07d905e83fd80f82fccd5289f4d35de3c598bfe5 | refs/heads/master | 2023-04-17T14:20:27.783431 | 2019-05-15T10:47:06 | 2019-05-15T10:47:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 207 | py | """
Reversed Strings
8kyu
by jhoffner
Complete the solution so that it reverses the string value passed into it.
solution('world') # returns 'dlrow'
"""
def solution(string):
return (string[::-1]) | [
"noreply@github.com"
] | Projeto-Front-end-Grupo2-M3.noreply@github.com |
d29954de8f62e3c9ec1497319acc72009ec90777 | 42d8bea28c7a34dde8d47f81e9463c5970af7120 | /app/api/convert.py | 6b5629f6f561c782c33307b24c72610c5501db73 | [
"MIT"
] | permissive | Tharitos/mse_teacher_plan | 1e26818811db4991eadca3157b28b2c9ae691416 | 4c577f810eb040c4a74810c98e2c8c4b514caf5d | refs/heads/master | 2020-04-07T05:57:52.084094 | 2018-11-18T19:04:02 | 2018-11-18T19:04:02 | 158,116,922 | 0 | 0 | NOASSERTION | 2018-11-18T19:01:48 | 2018-11-18T19:01:48 | null | UTF-8 | Python | false | false | 2,032 | py | from typing import Union, List, Type, Dict
import bson
import datetime
import mongoengine
from mongoengine.document import Document
ConvertedField = Dict[str, Union[str, int, List[str]]]
ConvertedDocument = List[ConvertedField]
def f(text: str, name: str, type: str, opts: List[str] = None,
value: str='', fixed: bool =False) -> ConvertedField:
if opts is None:
opts = []
return {
'text': text,
'name': name,
'type': type,
'opts': opts,
'value': value,
'fixed': fixed
}
def convert_HTML_to_mongo_types(obj) -> str:
if isinstance(obj, mongoengine.fields.IntField):
return 'number'
if isinstance(obj, mongoengine.fields.DateTimeField):
return 'date'
# if obj.isinstance(mongoengine.fields.StringField):
return 'text'
def convert_mongo_model(obj: Type[Document]) -> ConvertedDocument:
fields = obj._fields_ordered
res = []
for field in fields:
current_field = obj._fields[field]
try:
text = current_field.verbose_name
except AttributeError:
text = '%NO_VERBOSE_NAME%'
try:
fixed = current_field.changeable_by_admin
except AttributeError:
fixed = False
name = current_field.name
type = convert_HTML_to_mongo_types(current_field)
opts = None
if current_field.choices:
opts = current_field.choices
value = ''
res.append(f(text, name, type, opts, value, fixed))
return res
def convert_mongo_document(obj: Document) -> ConvertedDocument:
res = convert_mongo_model(obj)
fields = obj._fields_ordered
for i in range(len(fields)):
data = obj[fields[i]]
if isinstance(data, datetime.datetime):
data = data.date().isoformat()
if isinstance(data, bson.objectid.ObjectId):
data = str(data)
if isinstance(data, Document):
data = str(data.id)
res[i]['value'] = data
return res
| [
"thexcloud@gmail.com"
] | thexcloud@gmail.com |
dbd83fd658f3447de2a58ce867e9d787b0e8d792 | 8fc22651de565f0e4bd528b3702370c4eafc8717 | /35.py | 023e054f9e3d2d601547275109ec458b26930b66 | [] | no_license | shifelfs/shifel | f25bb9c194f1f30c38a47a6ed93d4d0d7b9bcd9d | 691b13e2509fb638f9759b3505b65291c165b7f9 | refs/heads/master | 2021-08-03T04:29:13.979578 | 2021-07-22T09:32:14 | 2021-07-22T09:32:14 | 186,749,917 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 100 | py | s=input()
count=0
for i in s:
if(i.isnumeric()==True):
count=count+1
print(count)
| [
"noreply@github.com"
] | shifelfs.noreply@github.com |
547059b430a0f4d7f48886d55eaa8b03d341b493 | 5c9511bc538045389e28183b0bc65d9b6cf51b85 | /04/Ukol19-21.py | 3870d83ee37a385d767e349bae0711e24db1b873 | [] | no_license | balonovatereza/Pyladies-repository | 0174360835dd1094532118eda1e2821da8108f77 | d4ad0dae829d21a56e6fb86d7a7dcfdc9387ae27 | refs/heads/master | 2020-05-01T09:25:25.300245 | 2019-05-18T09:10:50 | 2019-05-18T09:10:50 | 177,399,871 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | # Ukol 19
print('Ukol 19')
suma = 0
for cislo in range(3):
cislo = int(input('Zadej cislo:'))
suma = suma + cislo
if suma > 10:
print('Soucet tvych cisel je: ', suma, ' a to je vetsi nez 10.')
else:
print('Soucet tvych cisel je: ', suma, ' a to rozhodne vetsi nez 10 neni.')
# Ukol 20
print('Ukol 20')
sude_liche = int(input('Zadej cislo:'))
if sude_liche % 2 == 0:
print('Cislo ', sude_liche, ' je sude.')
else:
print('Cislo ', sude_liche, ' je liche.')
# Ukol 21
print('Ukol 21')
cislo_delitelne_3 = 'bum'
cislo_delitelne_5 = 'bac'
cislo_delitelne_3i5 = 'bum-bac'
for numero in range(101):
if numero % 3 == 0 and numero % 5 == 0:
numero = cislo_delitelne_3i5
elif numero % 3 == 0:
numero = cislo_delitelne_3
elif numero % 5 == 0:
numero = cislo_delitelne_5
print(numero)
# Ukol 22
print('Ukol 22')
faktorial = 1
n = int(input('Zadej cislo pro vypocet faktorialu: '))
for cislo in range(1, n + 1):
faktorial = cislo * faktorial
print(faktorial)
| [
"balonova.tereza@seznam.cz"
] | balonova.tereza@seznam.cz |
018f1ddd3be80f4eadd94ff9ff40b958fd2621d6 | 5d25e87c024028be669a86c4023a2a953a69e983 | /code/create_data.py | 78121bedd68978625640d5ca61ec6c9afed3c0b9 | [] | no_license | varunmuriyanat/spark_datascience | c3fe226c7e4d7ee8a05659687d33df3a81d013ad | a9819d957ce341e89aeb9f5b140476f5e40c7b70 | refs/heads/main | 2022-12-31T03:53:53.499396 | 2020-10-21T14:58:04 | 2020-10-21T14:58:04 | 306,043,696 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 628 | py | import pandas as pd
import numpy as np
import random
### create_df wil create a sample pandas df with nr_rows rows
def create_df( nr_rows ):
nr_pods = int(nr_rows/4000)
nr_trips = int(nr_rows/4000)
pods = ["pod_" + str(i) for i in range(nr_pods)]
trips = ["trip_" + str(i) for i in range(nr_pods)]
df = pd.DataFrame({
"pod_id": [random.choice(pods) for _ in range(nr_rows)],
"trip_id": [random.choice(trips) for _ in range(nr_rows)],
"timestamp":np.random.rand(nr_rows)*35*60,
"speed_mph": np.random.rand(nr_rows)*670.0
})
return df | [
"varunmuriyanat@gmail.com"
] | varunmuriyanat@gmail.com |
21f4756d443ce6abad255d6e103ad67f905be507 | 334a457eca0a0896039db77c1fadb0e0361d29b0 | /doc/source/conf.py | 06b62b4a1afdfcfc1897c5ec8b0518a86e441760 | [
"MIT"
] | permissive | WeKeyPedia/toolkit-python | b3ec5fd1026f357252c7015f55dbe87768bc6cec | 38c79844b5598173cdece35a180cf8b50995ff3c | refs/heads/master | 2022-06-29T11:30:35.852983 | 2015-04-28T12:50:43 | 2015-04-28T12:50:43 | 15,710,774 | 1 | 0 | MIT | 2022-06-21T21:12:53 | 2014-01-07T17:05:32 | Python | UTF-8 | Python | false | false | 9,426 | py | # -*- coding: utf-8 -*-
#
# wekeypedia documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 3 14:47:15 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
sys.path.append(os.path.abspath('../..'))
import wekeypedia
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
# 'sphinx.ext.pngmath',
'sphinxcontrib.napoleon'
]
napoleon_numpy_docstring = True
# Add any paths that contain templates here, relative to this directory.
autosummary_generate=True
templates_path = ['templates', "../rst_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wekeypedia'
copyright = u'2015, tam kien duong'
author = u'tam kien duong'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.4'
# The full version, including alpha/beta/rc tags.
release = '0.1.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = 'alabaster'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'wekeypediadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'wekeypedia.tex', u'wekeypedia Documentation',
u'tam kien duong', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'wekeypedia', u'wekeypedia Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'wekeypedia', u'wekeypedia Documentation',
author, 'wekeypedia', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"tk@deveha.com"
] | tk@deveha.com |
c9e189c373e48dc00d9ad5ed5f200afee8defd41 | 6d8f4c61eb56b8eb9c32e63a37b6df792d1df71c | /setup.py | 131ae6569a0083b106a54438a088431ab033ee45 | [
"MIT"
] | permissive | Global19/filearchive | 5893b0aac18579a5ad8913759754fbf3668ea56c | b39453ab5620d38d6411b205d5495a14edc9d0d9 | refs/heads/master | 2023-03-16T12:40:45.902108 | 2014-09-03T17:40:46 | 2014-09-03T17:40:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | import os
from setuptools import setup
def read(fname):
inf = open(os.path.join(os.path.dirname(__file__), fname))
out = "\n" + inf.read().replace("\r\n", "\n")
inf.close()
return out
setup(
name='filearchive',
version="1.1.2dev",
description='Python module to work with file archives',
long_description=read('README.md'),
author='Sridhar Ratnakumar',
author_email='github@srid.name',
url='http://github.com/ActiveState/filearchive',
license='MIT',
py_modules=["filearchive"],
)
| [
"github@srid.name"
] | github@srid.name |
463eab256b0d87656e3a266f45cd20e950d41b59 | 8c1c9bc8ca4dce8a5876f05a54e2e195900cc089 | /detection/wavrecord.py | 8ec794ed723e2b9970a8c8e93a0529f8098b84cc | [] | no_license | Emoic/Recycling-Bins-for-School-and-home | f3e301ce4060abe4ae79b45e234027c18eb5184d | 08d730bad3c97a9a667034c2cfb6901f41f7eb62 | refs/heads/main | 2023-03-25T12:20:09.459799 | 2021-03-24T03:58:28 | 2021-03-24T03:58:28 | 350,932,979 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,588 | py | import wave
from pyaudio import PyAudio,paInt16
from pydub import AudioSegment
framerate=16000
NUM_SAMPLES=2000
channels=1
sampwidth=2
TIME=2
def save_wave_file(filename,data):
'''save the date to the wavfile'''
wf=wave.open(filename,'wb')
wf.setnchannels(channels)
wf.setsampwidth(sampwidth)
wf.setframerate(framerate)
wf.writeframes(b"".join(data))
wf.close()
def my_record(file_name):
pa=PyAudio()
stream=pa.open(format = paInt16,channels=1,
rate=framerate,input=True,
frames_per_buffer=NUM_SAMPLES)
my_buf=[]
count=0
while count<TIME*8:#控制录音时间
string_audio_data = stream.read(NUM_SAMPLES)
my_buf.append(string_audio_data)
count+=1
print('.')
save_wave_file(file_name,my_buf)
stream.close()
chunk=2014
def play(file_name):
wf=wave.open(file_name,'rb')
p=PyAudio()
stream=p.open(format=p.get_format_from_width(wf.getsampwidth()),channels=
wf.getnchannels(),rate=wf.getframerate(),output=True)
data = wf.readframes(chunk)
while data != b'':
stream.write(data)
data = wf.readframes(chunk)
stream.stop_stream() # 停止数据流
stream.close()
p.terminate()
print('end')
def changefromt(source_file,output_file):
song = AudioSegment.from_mp3(source_file)
song.export(output_file, format="wav")
print('\n转换已完成')
if __name__ == '__main__':
my_record('01.pcm')
print('Over!')
play('01.pcm')
| [
"noreply@github.com"
] | Emoic.noreply@github.com |
b6fd533b2134c0c231bd30851ae3ad595200b593 | 71d8e5f9042e3b8045e76144655b725d81221d26 | /Gauss_Seidel/gauss_seidel.py | 162ecc1529ef33698fc5b8b01e714f36b72a4a1a | [] | no_license | Andrelpoj/Prog_Cient | 2e56d306b025ae86e89bf27332b575496188dd67 | 63d281f96b38f45d0fa195df5edc60f34b8086a2 | refs/heads/master | 2020-03-17T04:06:19.556571 | 2019-06-27T04:07:16 | 2019-06-27T04:07:16 | 133,262,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,785 | py | #Gauss-Seidel method
# def print_matrix(matrix):
# for line in matrix:
# l = '';
# for element in line:
# l = l + str(element) + ' '
# #print(element," ")
# print(l)
#matrix = [[int(num) for num in line.split(' ')] for line in file ]
#print_matrix(matrix)
def dif(v1,v2):
res = 0
for i in range(len(v1)):
aux = v1[i] - v2[i]
res += (aux ** 2)
res = (res ** (1/2))
return res
num_lines = len(open('matrix.txt').readlines())
file = open ( 'matrix.txt' , 'r')
values = []
columns = []
lines = [0]
count = 0
i = 0
for l in file:
numbers = list(map(int,l.split(' ')))
count = 0
for j in range(len(numbers)):
if numbers[j] != 0:
values.append(numbers[j])
columns.append(j)
count += 1
if i != num_lines-1:
lines.append(lines[i]+count)
i += 1
#print(values)
#print(columns)
#print(lines)
file.close()
result = []
old = []
for i in range(len(lines)):
result.append(0)
old.append(10)
file = open('b_array.txt','r')
l = file.readline()
b = [int(num) for num in l.split(' ')]
file.close()
#print(b)
tol = 0.0001
c = 0
print("\nIterações:")
while dif(old,result) >= tol:
old = list(result)
for i in range(len(result)):
start = lines[i]
sum = 0
diagonal = 0
if i != len(result)-1:
quant = lines[i+1] - lines[i]
else:
quant = len(values) - start
for j in range(quant):
if i != columns[start+j]:
sum += (values[start+j] * result[j])
else:
diagonal = values[start+j]
result[i] = (b[i] - sum)/diagonal
print(c,": ",result,"\n")
c += 1
print("\n\nResultado Final:",result)
| [
"andrelpoj@gmail.com"
] | andrelpoj@gmail.com |
c4217cdb768048de92d91a7d77b572f1e87e5963 | 19cb0062c86604540be2646503d1fc3a08290998 | /src/i2c_lib.py | 6a3257b031a29e046f49a0d46377a883b8562b9a | [] | no_license | Devinml/Raspberry-Pi-Human-Act-Rec | 2667906996bf611a5ba172f4a625451b3e372737 | 99f0d582890401bde423136b569b2f1000f59406 | refs/heads/master | 2022-12-11T01:41:44.758457 | 2020-09-18T14:06:32 | 2020-09-18T14:06:32 | 291,555,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | import smbus2 as smbus
from time import *
class i2c_device:
def __init__(self, addr, port=1):
self.addr = addr
self.bus = smbus.SMBus(port)
# Write a single command
def write_cmd(self, cmd):
self.bus.write_byte(self.addr, cmd)
sleep(0.0001)
# Write a command and argument
def write_cmd_arg(self, cmd, data):
self.bus.write_byte_data(self.addr, cmd, data)
sleep(0.0001)
# Write a block of data
def write_block_data(self, cmd, data):
self.bus.write_block_data(self.addr, cmd, data)
sleep(0.0001)
# Read a single byte
def read(self):
return self.bus.read_byte(self.addr)
# Read
def read_data(self, cmd):
return self.bus.read_byte_data(self.addr, cmd)
# Read a block of data
def read_block_data(self, cmd):
return self.bus.read_block_data(self.addr, cmd) | [
"devinlink8@gmail.com"
] | devinlink8@gmail.com |
2b45648db2c80daebcb2002c640b8bde09b4c8de | 6312e60ef386891f35f5b3b573d2eeaf6ae260ef | /845.py | 883df9b3ecd3aea7e12d2710168b6bb0aaf8c7b5 | [] | no_license | sp-shaopeng/leetcode-practice | 60938e3b8a4414b951ab1032e58ae15a741d1583 | 43a14e90b42ce1febb515e02cdd9d93781929173 | refs/heads/master | 2023-07-14T14:20:27.739201 | 2021-08-30T09:09:36 | 2021-08-30T09:09:36 | 341,163,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,591 | py | class Solution(object):
def longestMountain(self, arr):
"""
:type arr: List[int]
:rtype: int
"""
increase = 0
decrease = 0
ans = 0
#append last one to make it ==
arr.append(arr[len(arr) - 1])
for i in range(len(arr) - 1):
if(arr[i] == arr[i+1]) :
# it is increasing
if(decrease == 0) :
increase = 0
decrease = 0
else :
decrease = decrease + 1
current = increase + decrease
if(current > ans):
print("here", i,arr[i], current)
ans = current
increase = 0
decrease = 0
elif(arr[i] < arr[i+1]) :
if(decrease == 0):
increase = increase + 1
else:
decrease = decrease + 1
current = increase + decrease
if(current > ans):
print(i,arr[i], current)
ans = current
increase = 1
decrease = 0
else:
if(increase > 0) :
decrease = decrease + 1
else:
increase = 0
decrease = 0
return ans
| [
"sp.shaopeng@gmail.com"
] | sp.shaopeng@gmail.com |
6e8e2db86b07189c427f1c749be1fc59cb5a7325 | e6d5a800f0e9a419772cdd77332fd05330dde3b9 | /ex7_3.py | 67a17c06f2ac05a24089636edc73ef2c660d4132 | [] | no_license | clebsjr/python | fd2d1828a0dc87aacabfe232c4f5d6693a6dd4dc | 579851b7256fb2233b717768b460c1190aba0942 | refs/heads/master | 2021-06-25T14:16:45.192460 | 2020-12-10T15:20:17 | 2020-12-10T15:20:17 | 183,844,377 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | s1 = input("Digite a primeira string: ")
s2 = input("Digite a segunda string: ")
s3 = ""
for letra in s1:
if letra not in s2 and letra not in s3:
s3 += letra
for letra in s2:
if letra not in s1 and letra not in s3:
s3 += letra
if s3 == "":
print("Caracteres comuns não encontrados")
else:
print(f"Resultado: {s3}") | [
"cleberton.juninho@gmail.com"
] | cleberton.juninho@gmail.com |
58cb052e7dfd70b6584401958408c1b7f749e3a8 | 0d332ce571315884f3053467001a60043ae5be3c | /principal.py | 18f2936bd7d8e2a451cd14d8d29106a4a34b0523 | [] | no_license | cnunez1108/uno_game | 78199c236ccf53e87ae13bead2cb4e6701d43489 | 31eea886b033f060a9499be64f5bc0af5325508f | refs/heads/master | 2021-05-24T11:47:02.522173 | 2020-04-02T23:34:13 | 2020-04-02T23:34:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,370 | py | print("██╗ ██╗███╗ ██╗ ██████╗")
print("██║ ██║████╗ ██║██╔═══██╗")
print("██║ ██║██╔██╗ ██║██║ ██║")
print("██║ ██║██║╚██╗██║██║ ██║")
print("╚██████╔╝██║ ╚████║╚██████╔╝")
print( "╚═════╝ ╚═╝ ╚═══╝ ╚═════╝ ")
print(" ")
print(" Es un juego que se juega de 2 a 10 jugadores")
print(" ")
print(" Donde cada jugador recibe 7 cartas al empezar")
print(" ")
print(" Tu objetivo es llegar a los 500 puntos para derrotar a tus compañeros")
print(" ")
print(" ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄ ▄▄▄▄▄▄▄▄▄▄▄")
print("▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌")
print("▐░█▀▀▀▀▀▀▀▀▀ ▀▀▀▀█░█▀▀▀▀ ▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀▀▀▀█░▌ ▀▀▀▀█░█▀▀▀▀ ")
print("▐░▌ ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌ ")
print("▐░█▄▄▄▄▄▄▄▄▄ ▐░▌ ▐░█▄▄▄▄▄▄▄█░▌▐░█▄▄▄▄▄▄▄█░▌ ▐░▌ ")
print("▐░░░░░░░░░░░▌ ▐░▌ ▐░░░░░░░░░░░▌▐░░░░░░░░░░░▌ ▐░▌ ")
print(" ▀▀▀▀▀▀▀▀▀█░▌ ▐░▌ ▐░█▀▀▀▀▀▀▀█░▌▐░█▀▀▀▀█░█▀▀ ▐░▌ ")
print(" ▐░▌ ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌ ")
print(" ▄▄▄▄▄▄▄▄▄█░▌ ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌ ")
print("▐░░░░░░░░░░░▌ ▐░▌ ▐░▌ ▐░▌▐░▌ ▐░▌ ▐░▌ ")
print(" ▀▀▀▀▀▀▀▀▀▀▀ ▀ ▀ ▀ ▀ ▀ ▀ ")
print( " ")
print( )
print("________ ________ ________ ________ ________ ________ ________ ________ ________")
print('"""""""" """""""" """""""" """""""" """""""" """""""" """""""" """""""" """"""""')
print( )
from mesa import *
while True:
for s in range(0,len(jugadores)):
os.system("cls")
tablero.accion(jugadores)
if rondaIniciada:
carta = tablero.inicial(barajas)
rondaIniciada = False
else:
tablero.repCartas(carta)
print(" ")
print("{} tu mano es :".format(jugadores[s].nombre))
print(" ")
jugadores[s].mostrarMano()
print(" ")
jugadores[s].mostrarOpciones()
desicion = input("¿Que deseas hacer?: ")
while desicion not in ["q","r","w"]:
os.system("cls")
tablero.accion(jugadores)
if rondaIniciada:
carta = tablero.inicial(barajas)
rondaIniciada = False
else:
tablero.repCartas(carta)
print(" ")
print("{} tu mano es :".format(jugadores[s].nombre))
print(" ")
jugadores[s].mostrarMano()
print(" ")
jugadores[s].mostrarOpciones()
desicion = input("¿Que deseas hacer?: ")
if desicion == "q":
try:
opcion = int(input(" Que carta deseas jugar? : "))
jugada = jugadores[s].jugarCarta(opcion)
if jugada[0] in barajas.valorCartas:
if tablero.validarCarta:
if jugada[0] == "Retorno":
jugadores.reverse()
elif jugada[0] == "Elegir color":
especiales.mostrarColores()
color = input("Elige un color: ")
especiales.cambiarColor(jugada,especiales.opcionColor(color))
carta = jugada
tablero.repCartas(carta)
elif tablero.validarCarta(jugada,carta):
carta = jugada
tablero.repCartas(carta)
else:
jugadores[s].mano.append(jugada)
print("Haz sido penalizado por jugada incorrecta")
barajas.robar(jugadores[s])
time.sleep(2)
except:
print("Mira bien la longitud de tu mano")
print("Penalizado por no atencionar bien tu mano")
time.sleep(2)
elif desicion == "r":
barajas.robar(jugadores[s])
time.sleep(2)
else:
try:
jugadores[s].Uno()
opcion = int(input(" Que carta deseas jugar? : "))
jugada = jugadores[s].jugarCarta(opcion)
if jugada[0] in barajas.valorCartas:
if tablero.validarCarta:
if jugada[0] == "Retorno":
jugadores.reverse()
if tablero.validarCarta(jugada,carta):
carta = jugada
tablero.repCartas(carta)
else:
jugadores[s].mano.append(jugada)
print("Haz sido penalizado por jugada incorrecta")
barajas.robar(jugadores[s])
time.sleep(1)
time.sleep(2)
except:
print("Mira bien la longitud de tu mano")
print("Penalizado por no atencionar bien tu mano")
time.sleep(2)
if len(jugadores[s].mano) >= 2 and jugadores[s].estado == "Uno":
jugadores[s].estado = ""
if len(jugadores[s].mano) == 1 and jugadores[s].estado == "":
print("Haz sido penalizado por no decir 'Uno' ")
barajas.robar(jugadores[s])
time.sleep(2)
if len(jugadores[s].mano) == 0:
print("{} ha ganado esta ronda".format(jugadores[s].nombre))
jugadores[s].sumarPuntos(jugadores,jugadores[s],barajas)
tablero = Mesa()
tablero.accion(jugadores)
barajas.repartir(jugadores)
rondaIniciada = True
jugadores[s].reiniciarMano(jugadores)
jugadores[s].reiniciarEstado(jugadores)
print("Nueva ronda")
time.sleep(2)
os.system("cls")
if jugadores[s].verificarPuntos(jugadores):
break
| [
"luisprooc@gmail.com"
] | luisprooc@gmail.com |
c09c4c872e08f2b035c24a8533dc2d86407835e1 | ee53b0262007b2f0db0fe15b2ad85f65fafa4e25 | /Leetcode/1488. Avoid Flood in The City.py | 8c3fd8f830fe17cfd954caa9f8977d15f440474a | [] | no_license | xiaohuanlin/Algorithms | bd48caacb08295fc5756acdac609be78e143a760 | 157cbaeeff74130e5105e58a6b4cdf66403a8a6f | refs/heads/master | 2023-08-09T05:18:06.221485 | 2023-08-08T11:53:15 | 2023-08-08T11:53:15 | 131,491,056 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,234 | py | '''
Your country has an infinite number of lakes. Initially, all the lakes are empty, but when it rains over the nth lake, the nth lake becomes full of water. If it rains over a lake that is full of water, there will be a flood. Your goal is to avoid floods in any lake.
Given an integer array rains where:
rains[i] > 0 means there will be rains over the rains[i] lake.
rains[i] == 0 means there are no rains this day and you can choose one lake this day and dry it.
Return an array ans where:
ans.length == rains.length
ans[i] == -1 if rains[i] > 0.
ans[i] is the lake you choose to dry in the ith day if rains[i] == 0.
If there are multiple valid answers return any of them. If it is impossible to avoid flood return an empty array.
Notice that if you chose to dry a full lake, it becomes empty, but if you chose to dry an empty lake, nothing changes.
Example 1:
Input: rains = [1,2,3,4]
Output: [-1,-1,-1,-1]
Explanation: After the first day full lakes are [1]
After the second day full lakes are [1,2]
After the third day full lakes are [1,2,3]
After the fourth day full lakes are [1,2,3,4]
There's no day to dry any lake and there is no flood in any lake.
Example 2:
Input: rains = [1,2,0,0,2,1]
Output: [-1,-1,2,1,-1,-1]
Explanation: After the first day full lakes are [1]
After the second day full lakes are [1,2]
After the third day, we dry lake 2. Full lakes are [1]
After the fourth day, we dry lake 1. There is no full lakes.
After the fifth day, full lakes are [2].
After the sixth day, full lakes are [1,2].
It is easy that this scenario is flood-free. [-1,-1,1,2,-1,-1] is another acceptable scenario.
Example 3:
Input: rains = [1,2,0,1,2]
Output: []
Explanation: After the second day, full lakes are [1,2]. We have to dry one lake in the third day.
After that, it will rain over lakes [1,2]. It's easy to prove that no matter which lake you choose to dry in the 3rd day, the other one will flood.
Constraints:
1 <= rains.length <= 105
0 <= rains[i] <= 109
'''
import unittest
from typing import *
from bisect import bisect
class Solution:
def avoidFlood(self, rains: List[int]) -> List[int]:
maps = {}
zero_index = []
res = []
for i, num in enumerate(rains):
if num == 0:
zero_index.append(i)
res.append(1)
else:
if num in maps:
if not zero_index:
return []
k = bisect(zero_index, maps[num])
if k == len(zero_index):
return []
res[zero_index[k]] = num
del zero_index[k]
maps[num] = i
res.append(-1)
return res
class TestSolution(unittest.TestCase):
def test_case(self):
examples = (
(([1,2,0,0,2,1],),[-1,-1,2,1,-1,-1]),
)
for first, second in examples:
self.assert_function(first, second)
def assert_function(self, first, second):
self.assertEqual(Solution().avoidFlood(*first), second,
msg="first: {}; second: {}".format(first, second))
unittest.main()
| [
"xiaohuanlin1993@gmail.com"
] | xiaohuanlin1993@gmail.com |
e3bc9b5713cf79fea2f4287a0cc8aff9065c8319 | fc772efe3eccb65e4e4a8da7f2b2897586b6a0e8 | /Compute/nova/service.py | 581b43fe5ed3c2b69b793788585d38688f6cf69a | [] | no_license | iphonestack/Openstack_Kilo | 9ae12505cf201839631a68c9ab4c041f737c1c19 | b0ac29ddcf24ea258ee893daf22879cff4d03c1f | refs/heads/master | 2021-06-10T23:16:48.372132 | 2016-04-18T07:25:40 | 2016-04-18T07:25:40 | 56,471,076 | 0 | 2 | null | 2020-07-24T02:17:46 | 2016-04-18T02:32:43 | Python | UTF-8 | Python | false | false | 16,592 | py | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import os
import random
import sys
from oslo.config import cfg
from oslo import messaging
from oslo.utils import importutils
from oslo_concurrency import processutils
from nova import baserpc
from nova import conductor
from nova import context
from nova import debugger
from nova import exception
from nova.i18n import _, _LE, _LW
from nova.objects import base as objects_base
from nova.openstack.common import log as logging
from nova.openstack.common import service
from nova import rpc
from nova import servicegroup
from nova import utils
from nova import version
from nova import wsgi
LOG = logging.getLogger(__name__)
service_opts = [
cfg.IntOpt('report_interval',
default=10,
help='Seconds between nodes reporting state to datastore'),
cfg.BoolOpt('periodic_enable',
default=True,
help='Enable periodic tasks'),
cfg.IntOpt('periodic_fuzzy_delay',
default=60,
help='Range of seconds to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
cfg.ListOpt('enabled_apis',
default=['ec2', 'osapi_compute', 'metadata'],
help='A list of APIs to enable by default'),
cfg.ListOpt('enabled_ssl_apis',
default=[],
help='A list of APIs with enabled SSL'),
cfg.StrOpt('ec2_listen',
default="0.0.0.0",
help='The IP address on which the EC2 API will listen.'),
cfg.IntOpt('ec2_listen_port',
default=8773,
help='The port on which the EC2 API will listen.'),
cfg.IntOpt('ec2_workers',
help='Number of workers for EC2 API service. The default will '
'be equal to the number of CPUs available.'),
cfg.StrOpt('osapi_compute_listen',
default="0.0.0.0",
help='The IP address on which the OpenStack API will listen.'),
cfg.IntOpt('osapi_compute_listen_port',
default=8774,
help='The port on which the OpenStack API will listen.'),
cfg.IntOpt('osapi_compute_workers',
help='Number of workers for OpenStack API service. The default '
'will be the number of CPUs available.'),
cfg.StrOpt('metadata_manager',
default='nova.api.manager.MetadataManager',
help='OpenStack metadata service manager'),
cfg.StrOpt('metadata_listen',
default="0.0.0.0",
help='The IP address on which the metadata API will listen.'),
cfg.IntOpt('metadata_listen_port',
default=8775,
help='The port on which the metadata API will listen.'),
cfg.IntOpt('metadata_workers',
help='Number of workers for metadata service. The default will '
'be the number of CPUs available.'),
cfg.StrOpt('compute_manager',
default='nova.compute.manager.ComputeManager',
help='Full class name for the Manager for compute'),
cfg.StrOpt('console_manager',
default='nova.console.manager.ConsoleProxyManager',
help='Full class name for the Manager for console proxy'),
cfg.StrOpt('consoleauth_manager',
default='nova.consoleauth.manager.ConsoleAuthManager',
help='Manager for console auth'),
cfg.StrOpt('cert_manager',
default='nova.cert.manager.CertManager',
help='Full class name for the Manager for cert'),
cfg.StrOpt('network_manager',
default='nova.network.manager.VlanManager',
help='Full class name for the Manager for network'),
cfg.StrOpt('scheduler_manager',
default='nova.scheduler.manager.SchedulerManager',
help='Full class name for the Manager for scheduler'),
cfg.IntOpt('service_down_time',
default=60,
help='Maximum time since last check-in for up service'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
CONF.import_opt('host', 'nova.netconf')
class Service(service.Service):
"""Service object for binaries running on hosts.
A service takes a manager and enables rpc by listening to queues based
on topic. It also periodically runs tasks on the manager and reports
it state to the database services table.
"""
def __init__(self, host, binary, topic, manager, report_interval=None,
periodic_enable=None, periodic_fuzzy_delay=None,
periodic_interval_max=None, db_allowed=True,
*args, **kwargs):
super(Service, self).__init__()
self.host = host
self.binary = binary
self.topic = topic
self.manager_class_name = manager
# NOTE(russellb) We want to make sure to create the servicegroup API
# instance early, before creating other things such as the manager,
# that will also create a servicegroup API instance. Internally, the
# servicegroup only allocates a single instance of the driver API and
# we want to make sure that our value of db_allowed is there when it
# gets created. For that to happen, this has to be the first instance
# of the servicegroup API.
self.servicegroup_api = servicegroup.API(db_allowed=db_allowed)
manager_class = importutils.import_class(self.manager_class_name)
self.manager = manager_class(host=self.host, *args, **kwargs)
self.rpcserver = None
self.report_interval = report_interval
self.periodic_enable = periodic_enable
self.periodic_fuzzy_delay = periodic_fuzzy_delay
self.periodic_interval_max = periodic_interval_max
self.saved_args, self.saved_kwargs = args, kwargs
self.backdoor_port = None
self.conductor_api = conductor.API(use_local=db_allowed)
self.conductor_api.wait_until_ready(context.get_admin_context())
def start(self):
verstr = version.version_string_with_package()
LOG.audit(_('Starting %(topic)s node (version %(version)s)'),
{'topic': self.topic, 'version': verstr})
self.basic_config_check()
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
try:
self.service_ref = self.conductor_api.service_get_by_args(ctxt,
self.host, self.binary)
self.service_id = self.service_ref['id']
except exception.NotFound:
try:
self.service_ref = self._create_service_ref(ctxt)
except (exception.ServiceTopicExists,
exception.ServiceBinaryExists):
# NOTE(danms): If we race to create a record with a sibling
# worker, don't fail here.
self.service_ref = self.conductor_api.service_get_by_args(ctxt,
self.host, self.binary)
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
LOG.debug("Creating RPC server for service %s", self.topic)
target = messaging.Target(topic=self.topic, server=self.host)
endpoints = [
self.manager,
baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port)
]
endpoints.extend(self.manager.additional_endpoints)
serializer = objects_base.NovaObjectSerializer()
self.rpcserver = rpc.get_server(target, endpoints, serializer)
self.rpcserver.start()
self.manager.post_start_hook()
LOG.debug("Join ServiceGroup membership for this service %s",
self.topic)
# Add service to the ServiceGroup membership group.
self.servicegroup_api.join(self.host, self.topic, self)
if self.periodic_enable:
if self.periodic_fuzzy_delay:
initial_delay = random.randint(0, self.periodic_fuzzy_delay)
else:
initial_delay = None
self.tg.add_dynamic_timer(self.periodic_tasks,
initial_delay=initial_delay,
periodic_interval_max=
self.periodic_interval_max)
def _create_service_ref(self, context):
svc_values = {
'host': self.host,
'binary': self.binary,
'topic': self.topic,
'report_count': 0,
}
service = self.conductor_api.service_create(context, svc_values)
self.service_id = service['id']
return service
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)
@classmethod
def create(cls, host=None, binary=None, topic=None, manager=None,
report_interval=None, periodic_enable=None,
periodic_fuzzy_delay=None, periodic_interval_max=None,
db_allowed=True):
"""Instantiates class and passes back application object.
:param host: defaults to CONF.host
:param binary: defaults to basename of executable
:param topic: defaults to bin_name - 'nova-' part
:param manager: defaults to CONF.<topic>_manager
:param report_interval: defaults to CONF.report_interval
:param periodic_enable: defaults to CONF.periodic_enable
:param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
:param periodic_interval_max: if set, the max time to wait between runs
"""
if not host:
host = CONF.host
if not binary:
binary = os.path.basename(sys.argv[0])
if not topic:
topic = binary.rpartition('nova-')[2]
if not manager:
manager_cls = ('%s_manager' %
binary.rpartition('nova-')[2])
manager = CONF.get(manager_cls, None)
if report_interval is None:
report_interval = CONF.report_interval
if periodic_enable is None:
periodic_enable = CONF.periodic_enable
if periodic_fuzzy_delay is None:
periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
debugger.init()
service_obj = cls(host, binary, topic, manager,
report_interval=report_interval,
periodic_enable=periodic_enable,
periodic_fuzzy_delay=periodic_fuzzy_delay,
periodic_interval_max=periodic_interval_max,
db_allowed=db_allowed)
return service_obj
def kill(self):
"""Destroy the service object in the datastore."""
self.stop()
try:
self.conductor_api.service_destroy(context.get_admin_context(),
self.service_id)
except exception.NotFound:
LOG.warning(_LW('Service killed that has no database entry'))
def stop(self):
try:
self.rpcserver.stop()
self.rpcserver.wait()
except Exception:
pass
try:
self.manager.cleanup_host()
except Exception:
LOG.exception(_LE('Service error occurred during cleanup_host'))
pass
super(Service, self).stop()
def periodic_tasks(self, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
ctxt = context.get_admin_context()
return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
def basic_config_check(self):
"""Perform basic config checks before starting processing."""
# Make sure the tempdir exists and is writable
try:
with utils.tempdir():
pass
except Exception as e:
LOG.error(_LE('Temporary directory is invalid: %s'), e)
sys.exit(1)
class WSGIService(object):
"""Provides ability to launch API from a 'paste' configuration."""
def __init__(self, name, loader=None, use_ssl=False, max_url_len=None):
"""Initialize, but do not start the WSGI server.
:param name: The name of the WSGI server given to the loader.
:param loader: Loads the WSGI application using the given name.
:returns: None
"""
self.name = name
self.manager = self._get_manager()
self.loader = loader or wsgi.Loader()
self.app = self.loader.load_app(name)
self.host = getattr(CONF, '%s_listen' % name, "0.0.0.0")
self.port = getattr(CONF, '%s_listen_port' % name, 0)
self.workers = (getattr(CONF, '%s_workers' % name, None) or
processutils.get_worker_count())
if self.workers and self.workers < 1:
worker_name = '%s_workers' % name
msg = (_("%(worker_name)s value of %(workers)s is invalid, "
"must be greater than 0") %
{'worker_name': worker_name,
'workers': str(self.workers)})
raise exception.InvalidInput(msg)
self.use_ssl = use_ssl
self.server = wsgi.Server(name,
self.app,
host=self.host,
port=self.port,
use_ssl=self.use_ssl,
max_url_len=max_url_len)
# Pull back actual port used
self.port = self.server.port
self.backdoor_port = None
def reset(self):
"""Reset server greenpool size to default.
:returns: None
"""
self.server.reset()
def _get_manager(self):
"""Initialize a Manager object appropriate for this service.
Use the service name to look up a Manager subclass from the
configuration and initialize an instance. If no class name
is configured, just return None.
:returns: a Manager instance, or None.
"""
fl = '%s_manager' % self.name
if fl not in CONF:
return None
manager_class_name = CONF.get(fl, None)
if not manager_class_name:
return None
manager_class = importutils.import_class(manager_class_name)
return manager_class()
def start(self):
"""Start serving this service using loaded configuration.
Also, retrieve updated port number in case '0' was passed in, which
indicates a random port should be used.
:returns: None
"""
if self.manager:
self.manager.init_host()
self.manager.pre_start_hook()
if self.backdoor_port is not None:
self.manager.backdoor_port = self.backdoor_port
self.server.start()
if self.manager:
self.manager.post_start_hook()
def stop(self):
"""Stop serving this API.
:returns: None
"""
self.server.stop()
def wait(self):
"""Wait for the service to stop serving this API.
:returns: None
"""
self.server.wait()
def process_launcher():
return service.ProcessLauncher()
# NOTE(vish): the global launcher is to maintain the existing
# functionality of calling service.serve +
# service.wait
_launcher = None
def serve(server, workers=None):
global _launcher
if _launcher:
raise RuntimeError(_('serve() can only be called once'))
_launcher = service.launch(server, workers=workers)
def wait():
_launcher.wait()
| [
"wwang@linx-info.com"
] | wwang@linx-info.com |
d7c897b1fa38a472e0636bfb49694cb78a9a4151 | 5492859d43da5a8e292777c31eace71e0a57dedf | /chat/migrations/0021_auto_20190711_2100.py | 648c9ff6915b6a1a5b5e87052c58dbab41893255 | [
"MIT"
] | permissive | akindele214/181hub_2 | 93ad21dc6d899b6c56fbe200354b1678bb843705 | 48b8814b5f66ad87f9a54721506076ddf70fe9bc | refs/heads/master | 2022-12-13T01:15:07.925556 | 2020-05-19T09:39:57 | 2020-05-19T09:39:57 | 196,470,605 | 1 | 1 | MIT | 2022-12-08T01:22:55 | 2019-07-11T22:04:42 | Python | UTF-8 | Python | false | false | 762 | py | # Generated by Django 2.1.5 on 2019-07-11 20:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('chat', '0020_reportchat_content'),
]
operations = [
migrations.AlterField(
model_name='reportchat',
name='chat',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='report_chat', to='chat.Chat'),
),
migrations.AlterField(
model_name='reportchat',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user', to=settings.AUTH_USER_MODEL),
),
]
| [
"yaomingnakel@gmail.com"
] | yaomingnakel@gmail.com |
72c5c560be0c150db2650cd8ddc1d2d5d0b5b6df | f4d8faeebbf9b7fe43396c637096a56c01a70060 | /blog/migrations/0006_auto_20201105_1114.py | 5f8babf0fc5e5c91344168878c7a39cc28a2de29 | [] | no_license | eloghin/blog | eb44f6d57e88fefacb48111791b9c96fd4883be9 | 3c27a112bb3d51a5a25e901c10a632d4d6251a15 | refs/heads/main | 2023-01-07T05:47:59.124104 | 2020-11-05T13:12:48 | 2020-11-05T13:12:48 | 309,698,269 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | # Generated by Django 3.1.2 on 2020-11-05 11:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20201021_0920'),
]
operations = [
migrations.AlterModelOptions(
name='comment',
options={'ordering': ('date_created',)},
),
migrations.AddField(
model_name='comment',
name='email',
field=models.EmailField(max_length=254, null=True),
),
]
| [
"contact@elenaloghin.ro"
] | contact@elenaloghin.ro |
6c1483f51ae32ce5f46ab80ebc0083aa53433dfe | 9633c82b4cb2ecd58f0d76bc00935e2cda69a5ac | /tools/build.py | dc798611927c932180bd8e7772e93eb781b7c32f | [] | no_license | kleopatra999/csswg-test | 1f4ff29b515166e751673d4ead579b27cb41b815 | 8c45cd116dae1ee5bd68650942bc6751e34bd80a | refs/heads/master | 2021-01-15T17:06:48.661211 | 2014-08-08T15:37:29 | 2014-08-08T15:37:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,621 | py | #!/usr/bin/env python
# CSS Test Suite Build Script
# Copyright 2011 Hewlett-Packard Development Company, L.P.
# Initial code by fantasai, joint copyright 2010 W3C and Microsoft
# Licensed under BSD 3-Clause: <http://www.w3.org/Consortium/Legal/2008/03-bsd-license>
import sys
import os
import json
import optparse
import shutil
from apiclient import apiclient
from w3ctestlib import Sources, Utils, Suite, Indexer
from mercurial import hg, ui
class Builder(object):
def __init__(self, ui, outputPath, backupPath, ignorePaths, onlyCache):
self.reset(onlyCache)
self.ui = ui
self.skipDirs = ('support')
self.rawDirs = {'other-formats': 'other'}
self.sourceTree = Sources.SourceTree(hg.repository(ui, '.'))
self.sourceCache = Sources.SourceCache(self.sourceTree)
self.cacheDir = 'tools/cache'
self.outputPath = outputPath.rstrip('/') if (outputPath) else 'dist'
self.backupPath = backupPath.rstrip('/') if (backupPath) else None
self.ignorePaths = [path.rstrip('/') for path in ignorePaths] if (ignorePaths) else []
self.workPath = 'build-temp'
self.ignorePaths += (self.outputPath, self.backupPath, self.workPath)
def reset(self, onlyCache):
self.useCacheOnly = onlyCache
self.shepherd = apiclient.apiclient.APIClient('https://api.csswg.org/shepherd/', version = 'vnd.csswg.shepherd.v1') if (not onlyCache) else None
self.cacheData = False
self.testSuiteData = {}
self.specificationData = {}
self.flagData = {}
self.specNames = {}
self.specAnchors = {}
self.buildSuiteNames = []
self.buildSpecNames = {}
self.testSuites = {}
def _loadShepherdData(self, apiName, description, **kwargs):
self.ui.status("Loading ", description, " information\n")
cacheFile = os.path.join(self.cacheDir, apiName + '.json')
if (not self.useCacheOnly or (not os.path.exists(cacheFile))):
result = self.shepherd.get(apiName, **kwargs)
if (result and (200 == result.status)):
data = {}
for name in result.data: # trim leading _
data[name[1:]] = result.data[name]
with open(cacheFile, 'w') as file:
json.dump(data, file)
return data
self.ui.status("Shepherd API call failed, result: ", result.status if result else 'None', "\n")
if (os.path.exists(cacheFile)):
self.ui.status("Loading cached data.\n")
try:
with open(cacheFile, 'r') as file:
return json.load(file)
except:
pass
return None
def _addAnchors(self, anchors, specName):
for anchor in anchors:
self.specAnchors[specName].add(anchor['uri'].lower())
if ('children' in anchor):
self._addAnchors(anchor['children'], specName)
def getSpecName(self, url):
if (not self.specNames):
for specName in self.specificationData:
specData = self.specificationData[specName]
officialURL = specData.get('base_uri')
if (officialURL):
if (officialURL.endswith('/')):
officialURL = officialURL[:-1]
self.specNames[officialURL.lower()] = specName
draftURL = specData.get('draft_uri')
if (draftURL):
if (draftURL.endswith('/')):
draftURL = draftURL[:-1]
self.specNames[draftURL.lower()] = specName
self.specAnchors[specName] = set()
if ('anchors' in specData):
self._addAnchors(specData['anchors'], specName)
if ('draft_anchors' in specData):
self._addAnchors(specData['draft_anchors'], specName)
url = url.lower()
for specURL in self.specNames:
if (url.startswith(specURL) and
((url == specURL) or
url.startswith(specURL + '/') or
url.startswith(specURL + '#'))):
anchorURL = url[len(specURL):]
if (anchorURL.startswith('/')):
anchorURL = anchorURL[1:]
specName = self.specNames[specURL]
if (anchorURL in self.specAnchors[specName]):
return (specName, anchorURL)
return (specName, None)
return (None, None)
def gatherTests(self, dir):
dirName = os.path.basename(dir)
if (dirName in self.skipDirs):
return
self.ui.note("Scanning directory: ", dir, "\n")
suiteFileNames = {}
for fileName in Utils.listfiles(dir):
filePath = os.path.join(dir, fileName)
if (self.sourceTree.isTestCase(filePath)):
source = self.sourceCache.generateSource(filePath, fileName)
if (source.isTest()):
metaData = source.getMetadata(True)
if (metaData):
for specURL in metaData['links']:
specName, anchorURL = self.getSpecName(specURL)
if (specName):
if (specName in self.buildSpecNames):
if (anchorURL):
for testSuiteName in self.buildSpecNames[specName]:
if (testSuiteName not in suiteFileNames):
suiteFileNames[testSuiteName] = set()
suiteFileNames[testSuiteName].add(fileName)
else:
self.ui.warn("Test links to unknown specification anchor: ", specURL, "\n in: ", filePath, "\n")
else:
self.ui.note("Unknown specification URL: ", specURL, "\n in: ", filePath, "\n")
else:
if (source.errors):
self.ui.warn("Error parsing '", filePath, "': ", ' '.join(source.errors), "\n")
else:
self.ui.warn("No metadata available for '", filePath, "'\n")
for testSuiteName in suiteFileNames:
if (dirName in self.rawDirs):
self.testSuites[testSuiteName].addRaw(dir, self.rawDirs[dirName])
else:
self.testSuites[testSuiteName].addTestsByList(dir, suiteFileNames[testSuiteName])
for subDir in Utils.listdirs(dir):
subDirPath = os.path.join(dir, subDir)
if (not (self.sourceTree.isIgnoredDir(subDirPath) or (subDirPath in self.ignorePaths))):
self.gatherTests(subDirPath)
def _findSections(self, baseURL, anchors, sectionData, parentSectionName = ''):
if (anchors):
for anchor in anchors:
if ('section' in anchor):
sectionData.append((baseURL + anchor['uri'], anchor['name'],
anchor['title'] if 'title' in anchor else 'Untitled'))
else:
sectionData.append((baseURL + anchor['uri'], parentSectionName + '.' + anchor['name'], None))
if ('children' in anchor):
self._findSections(baseURL, anchor['children'], sectionData, anchor['name'])
return sectionData
def getSections(self, specName):
specData = self.specificationData[specName]
specURL = specData['base_uri'] if ('base_uri' in specData) else specData.get('draft_uri')
anchorData = specData['anchors'] if ('anchors' in specData) else specData['draft_anchors']
sectionData = []
self._findSections(specURL, anchorData, sectionData)
return sectionData
def _user(self, user):
if (user):
data = user['full_name']
if ('organization' in user):
data += ', ' + user['organization']
if ('uri' in user):
data += ', ' + user['uri']
elif ('email' in user):
data += ', <' + user['email'].replace('@', ' @') + '>'
return data
return 'None Yet'
def getSuiteData(self):
data = {}
for testSuiteName in self.testSuiteData:
testSuiteData = self.testSuiteData[testSuiteName]
specData = self.specificationData[testSuiteData['specs'][0]]
data[testSuiteName] = {
'title': testSuiteData['title'] if ('title' in testSuiteData) else 'Untitled',
'spec': specData['title'] if ('title' in specData) else specData['name'],
'specroot': specData['base_uri'] if ('base_uri' in specData) else specData.get('draft_uri'),
'draftroot': specData['draft_uri'] if ('draft_uri' in specData) else specData.get('base_uri'),
'owner': self._user(testSuiteData['owners'][0] if ('owners' in testSuiteData) else None),
'harness': testSuiteName,
'status': testSuiteData['status'] if ('status' in testSuiteData) else 'Unknown'
}
return data
def getFlags(self):
data = {}
for flag in self.flagData:
flagData = self.flagData[flag]
data[flag] = {
'title': flagData['description'] if ('description' in flagData) else 'Unknown',
'abbr': flagData['title'] if ('title' in flagData) else flag
}
return data
def build(self, testSuiteNames):
try:
os.makedirs(self.cacheDir)
except:
pass
self.testSuiteData = self._loadShepherdData('test_suites', 'test suite')
if (not self.testSuiteData):
self.ui.warn("ERROR: Unable to load test suite information.\n")
return -1
if (testSuiteNames):
self.buildSuiteNames = []
for testSuiteName in testSuiteNames:
if (testSuiteName in self.testSuiteData):
self.buildSuiteNames.append(testSuiteName)
else:
self.ui.status("Unknown test suite: ", testSuiteName, "\n")
else:
self.buildSuiteNames = [testSuiteName for testSuiteName in self.testSuiteData if self.testSuiteData[testSuiteName].get('build')]
self.buildSpecNames = {}
if (self.buildSuiteNames):
self.specificationData = self._loadShepherdData('specifications', 'specification', anchors = True, draft = True)
if (not self.specificationData):
self.ui.warn("ERROR: Unable to load specification information.\n")
return -2
for testSuiteName in self.buildSuiteNames:
specNames = self.testSuiteData[testSuiteName].get('specs')
if (specNames):
for specName in specNames:
if (specName in self.specificationData):
if (specName in self.buildSpecNames):
self.buildSpecNames[specName].append(testSuiteName)
else:
self.buildSpecNames[specName] = [testSuiteName]
else:
self.ui.warn("WARNING: Test suite '", testSuiteName, "' references unknown specification: '", specName, "'.\n")
else:
self.ui.warn("ERROR: Test suite '", testSuiteName, "' does not have target specifications.\n")
else:
self.ui.status("No test suites identified\n")
return 0
if (not self.buildSpecNames):
self.ui.status("No target specifications identified\n")
return -3
self.flagData = self._loadShepherdData('test_flags', 'test flag')
if (not self.flagData):
self.ui.warn("ERROR: Unable to load flag information\n")
return -4
self.buildSuiteNames.sort()
for testSuiteName in self.buildSuiteNames:
data = self.testSuiteData[testSuiteName]
specData = self.specificationData[data['specs'][0]]
specURL = specData['base_uri'] if ('base_uri' in specData) else specData.get('draft_uri')
draftURL = specData['draft_uri'] if ('draft_uri' in specData) else specData.get('base_uri')
self.testSuites[testSuiteName] = Suite.TestSuite(testSuiteName, data['title'], specURL, draftURL, self.sourceCache, self.ui) # XXX need to support multiple specs
if ('formats' in data):
self.testSuites[testSuiteName].setFormats(data['formats'])
self.ui.status("Scanning test files\n")
for dir in Utils.listdirs('.'):
if (not (self.sourceTree.isIgnoredDir(dir) or (dir in self.ignorePaths))):
self.gatherTests(dir)
if (os.path.exists(self.workPath)):
self.ui.note("Clearing work path: ", self.workPath, "\n")
shutil.rmtree(self.workPath)
suiteData = self.getSuiteData()
flagData = self.getFlags()
templatePath = os.path.join('tools', 'templates')
for testSuiteName in self.buildSuiteNames:
testSuite = self.testSuites[testSuiteName]
self.ui.status("Building ", testSuiteName, "\n")
specSections = self.getSections(self.testSuiteData[testSuiteName]['specs'][0])
indexer = Indexer.Indexer(testSuite, specSections, suiteData, flagData, True,
templatePathList = [templatePath],
extraData = {'devel' : False, 'official' : True })
workPath = os.path.join(self.workPath, testSuiteName)
testSuite.buildInto(workPath, indexer)
# move from work path to output path
for testSuiteName in self.buildSuiteNames:
workPath = os.path.join(self.workPath, testSuiteName)
outputPath = os.path.join(self.outputPath, testSuiteName)
backupPath = os.path.join(self.backupPath, testSuiteName) if (self.backupPath) else None
if (os.path.exists(workPath)):
if (os.path.exists(outputPath)):
if (backupPath):
if (os.path.exists(backupPath)):
self.ui.note("Removing ", backupPath, "\n")
shutil.rmtree(backupPath) # delete old backup
self.ui.note("Backing up ", outputPath, " to ", backupPath, "\n")
shutil.move(outputPath, backupPath) # backup old output
else:
self.ui.note("Removing ", outputPath, "\n")
shutil.rmtree(outputPath) # no backups, delete old output
self.ui.note("Moving ", workPath, " to ", outputPath, "\n")
shutil.move(workPath, outputPath)
if (os.path.exists(self.workPath)):
shutil.rmtree(self.workPath)
return 0
def debugHook(type, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(type, value, tb)
else:
import traceback, pdb
# we are NOT in interactive mode, print the exception...
traceback.print_exception(type, value, tb)
print
# ...then start the debugger in post-mortem mode.
pdb.pm()
if __name__ == "__main__": # called from the command line
sys.excepthook = debugHook
parser = optparse.OptionParser(usage = "usage: %prog [options] test_suite [...]")
parser.add_option("-q", "--quiet",
action = "store_true", dest = "quiet", default = False,
help = "don't print status messages to stdout")
parser.add_option("-d", "--debug",
action = "store_true", dest = "debug", default = False,
help = "print detailed debugging information to stdout")
parser.add_option("-v", "--verbose",
action = "store_true", dest = "verbose", default = False,
help = "print more detailed debugging information to stdout")
parser.add_option("-o", "--output", dest = "output", metavar = "OUTPUT_PATH",
help = "Path to build into (default 'dist')")
parser.add_option("-b", "--backup", dest = "backup", metavar = "BACKUP_PATH",
help = "Path to preserve old version to")
parser.add_option("-i", "--ignore",
action = "append", dest = "ignore", metavar = "IGNORE_PATH",
help = "Ignore files in this path")
parser.add_option("-c", "--cache",
action = "store_true", dest = "cache", default = False,
help = "use cached test suite and specification data only")
(options, args) = parser.parse_args()
ui = ui.ui()
ui.setconfig('ui', 'debug', str(options.debug))
ui.setconfig('ui', 'quiet', str(options.quiet))
ui.setconfig('ui', 'verbose', str(options.verbose))
builder = Builder(ui, options.output, options.backup, options.ignore, options.cache)
result = builder.build(args)
quit(result)
| [
"peter.linss@hp.com"
] | peter.linss@hp.com |
90687734a25d313028207d5b66add9b5d039eb1f | 6ab217b675b0d33dec9d8985efc2de314e3a7a28 | /menus/controllers/restapi/menu_category/urls.py | 05579b2896e01b722317338f1b06535471c80647 | [] | no_license | nujkram/dream_cream_pastries | 3547928af859ebbb93f8d6ff64d02796d8c61a0c | c6a764f4f2c16191661ee6747dc0daa896eae5ec | refs/heads/master | 2023-06-20T20:20:21.001373 | 2021-07-29T00:55:49 | 2021-07-29T00:55:49 | 375,721,861 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,264 | py | from django.urls import path
from dream_cream_pastries_project.urls import (
URL_READ_ONLY,
URL_DETAIL,
URL_CREATE,
URL_UPDATE,
URL_DELETE
)
from .api import(
ApiPublicMenuCategoryListDetail,
ApiPrivateMenuCategoryViewSet
)
VERSION = 'v1'
urlpatterns = [
# public
path(
f'{VERSION}/public/list',
ApiPublicMenuCategoryListDetail.as_view(URL_READ_ONLY),
name='api_public_menu_category_list_detail'
),
# private
path(
f'{VERSION}/private/list',
ApiPrivateMenuCategoryViewSet.as_view(URL_READ_ONLY),
name='api_private_menu_category_list_detail'
),
path(
f'{VERSION}/private/create',
ApiPrivateMenuCategoryViewSet.as_view(URL_CREATE),
name='api_private_menu_category_create'
),
path(
f'{VERSION}/private/<pk>/update',
ApiPrivateMenuCategoryViewSet.as_view(URL_UPDATE),
name='api_private_menu_category_update'
),
path(
f'{VERSION}/private/<pk>/delete',
ApiPrivateMenuCategoryViewSet.as_view(URL_DELETE),
name='api_private_menu_category_delete'
),
]
"""
Add to urls.py urlpatterns:
path('menu_category/api/', include('menus.controllers.restapi.menu_category.urls'))
| [
"markjungersaniva@gmail.com"
] | markjungersaniva@gmail.com |
0646e9fd57a5a8ba9198885afcbdf59f25a09de9 | 027635467005c93a5b5406030b6f8852368e6390 | /question1_highest_average.py | a26682a8d9d01efbdae4eb073f9c60eec3e52feb | [] | no_license | Shadyaobuya/Opibus-Assessment | 0472a05e4c78b28cc5779d1a2a78c29cabb1ba04 | 8675e82a1c64d864eb4f85604d7843670a3f8078 | refs/heads/master | 2023-08-22T16:14:57.912494 | 2021-10-16T11:40:14 | 2021-10-16T11:40:14 | 417,516,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | #This is a program that takes in a list of discharge rates and returns the highest average
def find_highest_average(discharge):
new_list=[] #create an empty list that will hold the sum of two contiguous rates
for rate in range(len(discharge)-1):
high=discharge[rate]+discharge[rate+1]
new_list.append(high) #get the sum of every two contiguous rates and append it to the empty list
highest_sum=new_list[0] #make an assumption that the highest sum is at the first index of the empty list
for i in new_list:
if i >=highest_sum: #loop through the empty list and reasign the value of the highest sum
highest_sum=i
highest_average=highest_sum/2 #get the average of the highest sum
return highest_average
if __name__=='__main__':
print(find_highest_average([2, 3, 4, 1, 5])) #test case 1 output 3.5: [3,4]
print(find_highest_average([2, 3, 4, 8, 1, 5])) #test case 2 output:6.0 [4,8]
print(find_highest_average([6,1,7,3,9,6])) #test case 3 output:7.5: [9,6]
| [
"shadyaobuyagard@gmail.com"
] | shadyaobuyagard@gmail.com |
9316d52b5203a285e4a6f133e847d435da403389 | b622d5918d7c456393bd37d85b673d379acabb96 | /fetch_hubble.py | 55fcf39e90f44e5c437f6ce90f8bb7670d79fb15 | [
"MIT"
] | permissive | Nais-Alkator/Space_Instagram | ea827ff30714799db863fe8a20438cb571e2b44b | 1edc8d80427d5bd4331b8578f8250ec599334cdf | refs/heads/master | 2023-05-24T06:48:30.836116 | 2019-08-02T18:26:34 | 2019-08-02T18:26:34 | 200,279,712 | 0 | 0 | MIT | 2023-05-22T22:29:52 | 2019-08-02T18:20:57 | Python | UTF-8 | Python | false | false | 2,163 | py | import requests
import argparse
from save_image import save_image
from os import makedirs
def get_hubble_image_links(image_id):
hubble_api_url = "http://hubblesite.org/api/v3/image/{}".format(image_id)
response = requests.get(hubble_api_url, verify=False)
response.raise_for_status()
image_data = response.json()
image_data_files = image_data["image_files"]
image_links = [element["file_url"] for element in image_data_files]
return image_links
def get_link_extension(url):
link_extension = url[-4::]
return link_extension
def get_images_id_from_hubble_api_collection(collection):
url = "http://hubblesite.org/api/v3/images/{}".format(collection)
response = requests.get(url)
response.raise_for_status()
data = response.json()
images_ids = [element["id"] for element in data]
return images_ids
def download_image_from_hubble_api(image_id):
makedirs("images", exist_ok=True)
image_links = get_hubble_image_links(image_id)
image_url = "http:" + image_links[-1]
image_url_extension = get_link_extension(image_url)
file_name = "images/{0}{1}".format(image_id, image_url_extension)
save_image(file_name, image_url)
def download_images_from_hubble_api_collection(collection):
images_ids = get_images_id_from_hubble_api_collection(collection)
for image_id in images_ids:
image_id = str(image_id)
download_image_from_hubble_api(image_id)
def get_parser():
parser = argparse.ArgumentParser(
description='Скрипт предназначен для скачивания группы(коллекций) фотографий с Hubble API')
parser.add_argument('collection', help='Название коллекции фотографий для скачивания', nargs="?", const=1, default="holiday_cards")
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
collection = args.collection
try:
download_images_from_hubble_api_collection(collection)
except requests.exceptions.ConnectionError as error:
exit("Can't get data from server:\n{0}".format(error))
except requests.exceptions.HTTPError as error:
exit("Can't get data from server:\n{0}".format(error)) | [
"nais000k@gmail.com"
] | nais000k@gmail.com |
4f927e9d7d0958d207c867f7e532025737231927 | 1b9c2080c2998bedecd4f8b7aba2929284c5cc60 | /vbet/core/mixin.py | bc48cd0d62efd867f17fd3d8e8b4e4f0ed3add2c | [
"CC0-1.0"
] | permissive | updatedennismwangi/vbettrader | 8fda87b454de54cd2688b8f6c8a86ca67a7b98af | 21d48ea95bc235631aa6f9f47d0635327dd27531 | refs/heads/master | 2023-03-12T04:07:05.911246 | 2021-02-28T08:22:57 | 2021-02-28T08:43:26 | 343,052,657 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | class StatusMap:
RUNNING = 'RUNNING'
PAUSED = 'PAUSED'
INACTIVE = 'INACTIVE'
OFFLINE = 'OFFLINE'
ACTIVE = 'ACTIVE'
SLEEPING = 'SLEEPING'
| [
"updatedennismwangi@gmail.com"
] | updatedennismwangi@gmail.com |
3e061b5de164bf35f58c2cb96f0cc2ea8dc26ee9 | 43b994fe35de83a38b588478c16fe04428fa0932 | /06Pipeline_Mario/Pipeline_Mario.py | 8b6dffca781b02f17b84b5af305e165bf75e940b | [] | no_license | codeplay42/Python_Games | cc3c0202d430304e8e13888d3262e8e4ecfdb0a5 | 8ed4cead5b4b261511d9168b1e280f1d05fcdcd0 | refs/heads/master | 2020-11-27T10:59:37.585197 | 2019-12-21T11:24:22 | 2019-12-21T11:24:22 | 229,412,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,876 | py | # 操作说明:按空格键重新开始游戏,
'''
项目:Pipeline Mario
操作说明:
1.按 A 键开始游戏/重新开始游戏
2.上键跳跃,左右键左右移动,X 键发射火球
注意:
1.每接到一个金币,就可以发生一个火球
2.游戏屏幕尺寸较大,如果无法完整显示游戏,请调整电脑屏幕缩放比例
'''
import arcade
from random import randint
SCREEN_WIDTH = 1290
SCREEN_HEIGHT = 1010
SCREEN_TITLE = 'Pipeline Mario'
# 设置物理引擎参数
MOVE_SPEED = 6
JUMP_SPEED = 15
GRAVITY = 0.5
class MyGame(arcade.Window):
def __init__(self, width, height, title):
super().__init__(width, height, title)
self.game_state = 'start'
self.score = 0
self.coin_sound = arcade.load_sound('sounds/coin.wav')
self.jump_sound = arcade.load_sound('sounds/jump.wav')
# 导入封面、game over、马里奥 icon、金币 icon、地板
self.cover = arcade.Sprite(
'images/cover.png', center_x=SCREEN_WIDTH / 2, center_y=SCREEN_HEIGHT / 2)
self.game_over = arcade.Sprite(
'images/game_over.png', center_x=SCREEN_WIDTH / 2, center_y=SCREEN_HEIGHT / 2 + 100)
self.mario_icon = arcade.Sprite(
'images/mario_icon.png', center_x=SCREEN_WIDTH / 2 - 100, center_y=SCREEN_HEIGHT - 50)
self.coin_icon = arcade.Sprite(
'images/coin_icon.png', center_x=SCREEN_WIDTH / 2 + 100, center_y=SCREEN_HEIGHT - 50)
self.floor = arcade.Sprite(
'images/floor.png', center_x=SCREEN_WIDTH / 2, center_y=40)
self.mario_list = arcade.SpriteList()
for i in range(8):
self.mario_list.append(arcade.Sprite(
'images/mario{}.png'.format(i + 1), center_x=100, center_y=136))
self.mario = arcade.Sprite(
'images/mario1.png', center_x=100, center_y=136)
self.direction = 0
self.index = 0
self.count = False
self.timer = 0
self.key_pressed = False
self.pipe_up = arcade.SpriteList()
self.pipe_down = arcade.SpriteList()
self.bricks = arcade.SpriteList()
self.bricks.append(self.floor)
self.monsters = arcade.SpriteList()
pipe_up_pos = [(120, SCREEN_HEIGHT - 180),
(SCREEN_WIDTH - 120, SCREEN_HEIGHT - 180)]
pipe_down_pos = [(80, 145), (SCREEN_WIDTH - 80, 145)]
# 导入火球、金币、敌人角色
self.fireball = arcade.Sprite(
'images/fireball1.png', center_x=-100, center_y=100)
self.shoot = False
self.coin_list = arcade.SpriteList()
self.enermy_list = arcade.SpriteList()
# 导入水管角色
for i in range(2):
pipe_up = arcade.Sprite('images/pipe_up{}.png'.format(i + 1),
center_x=pipe_up_pos[i][0], center_y=pipe_up_pos[i][1])
pipe_down = arcade.Sprite('images/pipe_down{}.png'.format(
i + 1), center_x=pipe_down_pos[i][0], center_y=pipe_down_pos[i][1])
coin = arcade.Sprite(
'images/coin.png', center_x=250 + 790 * i, center_y=SCREEN_HEIGHT - 155)
turtle = arcade.Sprite(
'images/turtle{}.png'.format(i + 1), center_x=250 + 790 * i, center_y=SCREEN_HEIGHT - 155)
self.pipe_up.append(pipe_up)
self.pipe_down.append(pipe_down)
self.coin_list.append(coin)
self.enermy_list.append(turtle)
# 绘制砖块
num_list = [16, 16, 16, 6, 6, 14, 14]
birck_pos = [(-60, SCREEN_HEIGHT - 290), (SCREEN_WIDTH - num_list[1] * 40 + 100, SCREEN_HEIGHT - 290),
(SCREEN_WIDTH / 2 - num_list[2]
* 20 + 20, SCREEN_HEIGHT - 500),
(-60, SCREEN_HEIGHT - 550), (SCREEN_WIDTH -
num_list[4] * 40 + 100, SCREEN_HEIGHT - 550),
(-60, SCREEN_HEIGHT - 750), (SCREEN_WIDTH - num_list[6] * 40 + 100, SCREEN_HEIGHT - 750)]
for i in range(7):
for j in range(num_list[i]):
brick = arcade.Sprite('images/brick.png')
brick.center_x = birck_pos[i][0] + j * 40
brick.center_y = birck_pos[i][1]
self.bricks.append(brick)
self.life = 3
self.coin_num = 0 # 收集金币数量
self.fireball_num = 0 # 火球数量,每收集一个金币,可以发射一个火球
self.fireball_speed = MOVE_SPEED
# 给马里奥、乌龟、金币添加物理引擎
pe_list = [self.mario, self.coin_list[0], self.coin_list[
1], self.enermy_list[0], self.enermy_list[1]]
self.physics_engine = []
for sprite in pe_list:
self.physics_engine.append(arcade.PhysicsEnginePlatformer(
sprite, self.bricks, gravity_constant=GRAVITY))
def on_draw(self):
arcade.start_render()
if self.game_state == 'start':
self.cover.draw()
else:
# 显示角色
self.enermy_list.draw()
self.coin_list.draw()
self.pipe_up.draw()
self.pipe_down.draw()
self.mario_list[self.index].draw()
self.bricks.draw()
self.fireball.draw()
self.mario_icon.draw()
self.coin_icon.draw()
arcade.draw_text('x {}'.format(self.life), SCREEN_WIDTH /
2 - 70, SCREEN_HEIGHT - 65, (255, 255, 255), font_size=30)
arcade.draw_text('x {}'.format(self.coin_num), SCREEN_WIDTH /
2 + 122, SCREEN_HEIGHT - 65, (255, 255, 255), font_size=30)
if self.game_state == 'game over':
self.game_over.draw()
def on_update(self, delta_time):
if self.game_state == 'playing':
# 限制马里奥的 x 坐标范围
self.set_boundary(self.mario, 40)
# 乌龟与金币移动
for i in range(len(self.enermy_list)):
self.enermy_list[i].change_x = 4 - i * 8
self.coin_list[i].change_x = 3 - i * 6
self.set_boundary(self.enermy_list[i], 40)
self.set_boundary(self.coin_list[i], 20)
if self.enermy_list[i].center_y < 150 and abs(self.enermy_list[i].center_x - SCREEN_WIDTH * (1 - i)) < 120:
self.reset_pos(self.enermy_list[i], i)
if self.coin_list[i].center_y < 150 and abs(self.coin_list[i].center_x - SCREEN_WIDTH * (1 - i)) < 140:
self.reset_pos(self.coin_list[i], i)
# 发射火球
if self.shoot:
if self.fireball.center_x > -100 and self.fireball.center_x < SCREEN_WIDTH + 100:
self.fireball.center_x += self.fireball_speed
else:
self.shoot = False
# 碰撞检测
for i in range(len(self.enermy_list)):
if arcade.check_for_collision(self.mario, self.enermy_list[i]):
self.life -= 1
self.reset_pos(self.enermy_list[i], i)
if self.life == 0:
self.game_state = 'game over'
if arcade.check_for_collision(self.mario, self.coin_list[i]):
self.coin_num += 1
self.fireball_num += 1
self.score += 1
arcade.play_sound(self.coin_sound)
self.reset_pos(self.coin_list[i], i)
if arcade.check_for_collision(self.fireball, self.enermy_list[i]):
self.score += 1
self.reset_pos(self.enermy_list[i], i)
# 马里奥移动时才切换造型,形成跑动的动画
if self.count:
if self.direction == 0:
self.timer += 1
if self.timer > 600:
self.timer = 0
if self.timer % 2 == 0:
self.index += 1
if self.index > 3:
self.index = 0
else:
self.timer += 1
if self.timer % 2 == 0:
self.index += 1
if self.index > 7:
self.index = 4
self.mario_list[self.index].center_x = self.mario.center_x
self.mario_list[self.index].center_y = self.mario.center_y
if self.key_pressed and self.physics_engine[0].can_jump():
self.count = True
else:
self.count = False
# update 物理引擎
for pe in self.physics_engine:
pe.update()
def set_boundary(self, obj, dis): # 角色移出屏幕后,从屏幕另一侧出现
if obj.center_x < -dis:
obj.center_x = SCREEN_WIDTH + dis
elif obj.center_x > SCREEN_WIDTH + dis:
obj.center_x = -dis
def reset_pos(self, obj, index): # 重置角色的位置到上面的水管出口处
obj.center_x = 250 + 790 * index
obj.center_y = SCREEN_HEIGHT - 155
def on_key_press(self, key, modifiers):
# A 键开始游戏
if key == arcade.key.A:
self.game_state = 'playing'
# 上键跳跃,左右键左右移动,X 键发射火球
if key == arcade.key.UP:
if self.physics_engine[0].can_jump():
self.mario.change_y = JUMP_SPEED
arcade.play_sound(self.jump_sound)
if key == arcade.key.LEFT:
self.mario.change_x = -MOVE_SPEED
self.key_pressed = True
self.direction = 0
if self.index > 3:
self.index -= 4
elif key == arcade.key.RIGHT:
self.mario.change_x = MOVE_SPEED
self.key_pressed = True
self.direction = 1
if self.index < 4:
self.index += 4
if key == arcade.key.X:
if self.fireball_num > 0:
self.shoot = True
self.fireball_num -= 1
# 将火球移到马里奥附近
self.fireball.center_x = self.mario.center_x
self.fireball.center_y = self.mario.center_y
if self.direction == 1:
self.fireball_speed = MOVE_SPEED
else:
self.fireball_speed = -MOVE_SPEED
def on_key_release(self, key, modifiers):
if key == arcade.key.LEFT or key == arcade.key.RIGHT:
self.mario.change_x = 0
self.key_pressed = False
def main():
game = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
arcade.run()
if __name__ == '__main__':
main()
| [
"jasm1156@hotmail.com"
] | jasm1156@hotmail.com |
2d1c10c9c919dd7f1abaf144d80099aa3e504be5 | b4fd72cecf3144496dd264d2dfc66fee1e5185b7 | /TobyAndTheFlowers/generators/gen_random.py | 7248b79789b802ace9d29b164cfe78dceb16a1e9 | [] | no_license | jhonber/Interna2 | ad236bb013c0e35cd81cec4983f994a2fa3de306 | d5eedc1f4bcd6d535680384858cc0375cb9aeae9 | refs/heads/master | 2020-03-17T15:58:49.547465 | 2018-06-11T22:28:16 | 2018-06-11T22:28:16 | 133,731,305 | 5 | 0 | null | 2018-06-11T17:22:09 | 2018-05-16T22:56:07 | TeX | UTF-8 | Python | false | false | 390 | py | import sys
from random import randint
N, Q = map(int, sys.argv[1:3])
print(N, Q)
for i in range(N):
print(randint(0, 1e6), end=' ')
print()
for i in range(Q):
T = randint(1, 2)
print(T, end=' ')
if T == 1:
print(randint(1, N), randint(-1e6, 1e6))
else:
L, R = randint(1, N), randint(1, N)
if L > R:
L, R = R, L
print(L, R) | [
"manuel.felipe.pineda@gmail.com"
] | manuel.felipe.pineda@gmail.com |
1a54da2add1bd9577ec9109d3620de423fa16e30 | d31d744f62c09cb298022f42bcaf9de03ad9791c | /federated/tensorflow_federated/__init__.py | 7153a5a59599a5d2457dc00818688f32f3380d26 | [] | no_license | yuhuofei/TensorFlow-1 | b2085cb5c061aefe97e2e8f324b01d7d8e3f04a0 | 36eb6994d36674604973a06159e73187087f51c6 | refs/heads/master | 2023-02-22T13:57:28.886086 | 2021-01-26T14:18:18 | 2021-01-26T14:18:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,090 | py | # Copyright 2019, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The TensorFlow Federated library."""
import sys
from tensorflow_federated.version import __version__ # pylint: disable=g-bad-import-order
from tensorflow_federated.python import aggregators
from tensorflow_federated.python import learning
from tensorflow_federated.python import simulation
from tensorflow_federated.python.core import backends
from tensorflow_federated.python.core import framework
from tensorflow_federated.python.core import templates
from tensorflow_federated.python.core import test
from tensorflow_federated.python.core import utils
from tensorflow_federated.python.core.api.computation_base import Computation
from tensorflow_federated.python.core.api.computation_types import at_clients as type_at_clients
from tensorflow_federated.python.core.api.computation_types import at_server as type_at_server
from tensorflow_federated.python.core.api.computation_types import FederatedType
from tensorflow_federated.python.core.api.computation_types import FunctionType
from tensorflow_federated.python.core.api.computation_types import SequenceType
from tensorflow_federated.python.core.api.computation_types import StructType
from tensorflow_federated.python.core.api.computation_types import StructWithPythonType
from tensorflow_federated.python.core.api.computation_types import TensorType
from tensorflow_federated.python.core.api.computation_types import to_type
from tensorflow_federated.python.core.api.computation_types import Type
from tensorflow_federated.python.core.api.computations import check_returns_type
from tensorflow_federated.python.core.api.computations import federated_computation
from tensorflow_federated.python.core.api.computations import tf_computation
from tensorflow_federated.python.core.api.intrinsics import federated_aggregate
from tensorflow_federated.python.core.api.intrinsics import federated_apply
from tensorflow_federated.python.core.api.intrinsics import federated_broadcast
from tensorflow_federated.python.core.api.intrinsics import federated_collect
from tensorflow_federated.python.core.api.intrinsics import federated_eval
from tensorflow_federated.python.core.api.intrinsics import federated_map
from tensorflow_federated.python.core.api.intrinsics import federated_mean
from tensorflow_federated.python.core.api.intrinsics import federated_reduce
from tensorflow_federated.python.core.api.intrinsics import federated_secure_sum
from tensorflow_federated.python.core.api.intrinsics import federated_sum
from tensorflow_federated.python.core.api.intrinsics import federated_value
from tensorflow_federated.python.core.api.intrinsics import federated_zip
from tensorflow_federated.python.core.api.intrinsics import sequence_map
from tensorflow_federated.python.core.api.intrinsics import sequence_reduce
from tensorflow_federated.python.core.api.intrinsics import sequence_sum
from tensorflow_federated.python.core.api.placements import CLIENTS
from tensorflow_federated.python.core.api.placements import SERVER
from tensorflow_federated.python.core.api.typed_object import TypedObject
from tensorflow_federated.python.core.api.value_base import Value
from tensorflow_federated.python.core.api.values import to_value
if sys.version_info[0] < 3 or sys.version_info[1] < 6:
raise Exception('TFF only supports Python versions 3.6 or later.')
# Initialize a default execution context. This is implicitly executed the
# first time a module in the `core` package is imported.
backends.native.set_local_execution_context()
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
6968da6b7b6f82c7e33c18ca4b24ccbb4f3c0126 | 5b430dbafe893836e0bc06f6fd9a169d4164a8cb | /photos/urls.py | bc038cebceb2d49a746b763b625593027b644a03 | [
"MIT"
] | permissive | tytyne/instaphoto | e82d8d9bd31d37b4f2ab2f05ef2bf7e0e5bf8d5b | 16effd5c498874bc0e6fdfc56ea7c1b594e5ff74 | refs/heads/master | 2022-12-10T03:30:24.780360 | 2019-04-01T16:00:59 | 2019-04-01T16:00:59 | 178,399,082 | 0 | 0 | null | 2022-12-08T01:03:42 | 2019-03-29T12:11:46 | Python | UTF-8 | Python | false | false | 796 | py | from django.conf.urls import url
from django.conf import settings
from django.conf.urls.static import static
from . import views
urlpatterns=[
url(r'^$',views.photos_today,name='photosToday'),
url(r'^archives/(\d{4}-\d{2}-\d{2})/$',views.past_days_photos,name = 'pastphotos'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^image/(\d+)',views.image,name ='image'),
url(r'^photo/image$', views.photo_image, name='photo_image'),
url(r'^upload/profile', views.upload_profile, name='upload_profile'),
url(r'^profile/', views.profile, name='profile'),
url(r'^comment/(?P<article_id>[0-9]+)/$', views.add_comment, name='add_comment'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) | [
"dusaflora2@gmail.com"
] | dusaflora2@gmail.com |
b7e8eb4ff673d765760791c286fd4afa4606bcb5 | 790571af693f38b2423aca66e5ab7db65562c5a5 | /data/real_len.py | 1f85dc6eb9e871fd206cb58fb75229e2122f4478 | [] | no_license | andiac/grammarVAE | cfe03928b910d635d90d01fb15c9aefc6a4b2f20 | 75f361a70a5cfaa5ba54b91cea335569b91b3ac9 | refs/heads/master | 2021-04-09T13:17:23.919604 | 2018-05-31T23:08:55 | 2018-05-31T23:08:55 | 125,671,733 | 0 | 0 | null | 2018-03-17T21:49:42 | 2018-03-17T21:49:41 | null | UTF-8 | Python | false | false | 100 | py | f = open("2db.cfg")
lines = []
for line in f:
lines.append(line.strip())
print(len(set(lines)))
| [
"me@andi.ac"
] | me@andi.ac |
abada1167457df8faaf71d0f85057c37fcd5b748 | 929fc8dd47b91c963c8c2f81d88e3d995a9dfc7c | /src/subject/Tree.py | 3ebe30494ae72e4da3574a67bea453247420b88b | [] | no_license | 1325052669/leetcode | fe7571a9201f4ef54089c2e078810dad11205b14 | dca40686c6a280bd394feb8e6e78d40eecf854b9 | refs/heads/master | 2023-04-01T17:53:30.605822 | 2021-04-10T15:17:45 | 2021-04-10T15:17:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,284 | py | class TreeNode:
def __init__(self,val):
self.val=val
self.left =None
self.right = None
class Tree:
def __init__(self):
pass
def pre_order_traverse(self,root):
if not root:return []
res = []
def dfs(node,res):
if not node:return
res.append(node.val)
dfs(node.left,res)
dfs(node.right,res)
dfs(root,res)
return res
def pre_order_iterative(self, root):
if not root:return []
stack = [root]
res =[]
while stack:
node = stack.pop()
res.append(node.val)
if node.right:
stack.append(node.right)
if node.left:
stack.append(node.left)
return res
def pre_order_divide_conquer(self,node):
if not node:return []
res=[node.val]
left = self.pre_order_divide_conquer(node.left)
right = self.pre_order_divide_conquer(node.right)
res.extend(left)
res.extend(right)
return res
def in_order_traverse(self,root):
if not root:return []
res = []
def dfs(node,res):
if not node:return
dfs(node.left,res)
res.append(node.val)
dfs(node.right,res)
dfs(root,res)
return res
def in_order_iterative(self,root):
if not root:return []
stack = []
res = []
cur = root
while stack or cur:
if cur:
stack.append(cur)
cur = cur.left
else:
node = stack.pop()
res.append(node.val)
cur = node.right
return res
def in_order_divide_conqur(self,root):
if not root:return []
res =[]
left = self.in_order_divide_conqur(root.left)
res += left
res.append(root.val)
right = self.in_order_divide_conqur(root.right)
res+=right
return res
def post_order_traverse(self,root):
if not root:return []
def dfs(node,res):
if not node:return
dfs(node.left,res)
dfs(node.right,res)
res.append(node.val)
res=[]
dfs(root,res)
return res
def post_order_divide_conqur(self,node):
if not node:return []
res = []
left = self.post_order_divide_conqur(node.left)
right = self.post_order_divide_conqur(node.right)
res+=left
res+=right
res.append(node.val)
return res
def main():
root = TreeNode(1)
root.left = TreeNode(2)
root.right = TreeNode(3)
root.left.left = TreeNode(4)
root.left.right = TreeNode(5)
root.right.left = TreeNode(6)
root.right.right = TreeNode(7)
# print(Tree().pre_order_traverse(root))
# print(Tree().pre_order_iterative(root))
# print(Tree().pre_order_divide_conquer(root))
# print(Tree().in_order_traverse(root))
# print(Tree().in_order_iterative(root))
# print(Tree().in_order_divide_conqur(root))
print(Tree().post_order_traverse(root))
print(Tree().post_order_divide_conqur(root))
if __name__ == '__main__':
main() | [
"js7995@nyu.edu"
] | js7995@nyu.edu |
7a76eb796960f6e5585664e4a5e5110ef3642798 | 20e22741cb1b4aee02d22b8e304f8023fb346c25 | /media.py | 0931a385c48171e3574cde7cecc301a3cecf8c26 | [] | no_license | hcdifrancesco/Movie-Website | fc9090d733c078f95a5676c63014280ec2550fd2 | 5c89a04462617efa69c24765d87e696e61e3cd0a | refs/heads/master | 2020-06-06T14:40:08.872021 | 2015-07-06T03:53:39 | 2015-07-06T03:53:39 | 38,520,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | # importing webbrowser to open images and videos on browser.
import webbrowser
class Movie():
"""This Class provides a way to store information about movies"""
# Class Variable Defining Ratings of Movies.
VALID_RATINGS = ["G", "PG", "PG-13", "R"]
# function __init__ with 7 attributes of each movie.
def __init__(self, movie_title, movie_storyline, poster_image,
trailer_youtube, movie_actors, movie_oscar_wins):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.starring = movie_actors
self.oscar_wins = movie_oscar_wins
# function show_trailer opens youtube trailer of corresponding movie.
def show_trailer(self):
webbrowser.open(self.trailer_youtube_url)
| [
"hcdifrancesco@gmail.com"
] | hcdifrancesco@gmail.com |
cf4964faa747c9aa4eca003628a3c7b18cc6b9f7 | be15873fb5254653bf3f50ade2d2619723b24bdd | /Mundo_2(Python)/elif/aula12_desafio39.py | 85b17c2655fecb251f5b6c01a1af2eaaaecf4e28 | [] | no_license | Yasmin-Core/Python | 5be367909c6a9fb408873a3212ed1b1c35384521 | de3b999922e08f9e78c4f439cca41acc4630bf25 | refs/heads/main | 2023-06-05T02:09:36.160880 | 2021-06-14T18:16:18 | 2021-06-14T18:16:18 | 376,891,635 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | ano= int(input('Digite o seu ano de nascimento: '))
if ano == 2003:
print ('Essa é a hora certa para se alistar!')
elif ano < 2003:
print ('Você ainda esta a tempo de alistar')
a= 2003-ano
print ('Falta {} anos para vocẽ se alistar!'.format(a))
elif ano > 2003:
print ('Você está atrasado para se alistar!')
b= ano - 2003
print('Você esta atrasado a {} ano(s).'.format(b)) | [
"yasmin.cristina.ifsp@gmail.com"
] | yasmin.cristina.ifsp@gmail.com |
7064b6c540d1e3db6ac60496c475efd236284da2 | 41ba693aea0bfe9ce5f25a30ac1c1c314f6d0f1d | /apps/user/urls.py | c8ddb0a1245f90b142a1641b254ef576ef81aa65 | [] | no_license | Back23/dailyfresh | 64b64103917ab27daa72161b601984cb121bb845 | 9542122fa2cb0c9b5839407cb51e95cd4171e804 | refs/heads/master | 2020-08-12T14:44:24.741449 | 2019-10-24T06:12:44 | 2019-10-24T06:12:44 | 214,784,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,303 | py | from django.urls import path, re_path
from django.contrib.auth.decorators import login_required
# from user import views
from user.views import RegisterView, ActiveView, LoginView, UserInfoView, UserOrderView, AddressView, LogoutView
app_name = 'apps.user'
urlpatterns = [
# path('register/', views.register, name='register'), # 注册
# path('register_handle/', views.register_handle, name='register_handle'), # 注册处理
path('register/', RegisterView.as_view(), name='register'), # 注册
re_path('active/(?P<token>.*)', ActiveView.as_view(), name='active'), # 激活
path('login/', LoginView.as_view(), name='login'), # 登录
path('logout/', LogoutView.as_view(), name='logout'), # 注销登录
# path('', login_required(UserInfoView.as_view()), name='user'), # 用户中心-信息页
# path('order/', login_required(UserOrderView.as_view()), name='order'), # 用户中心-订单页
# path('address/', login_required(AddressView.as_view()), name='address'), # 用户中心-地址页
path('', UserInfoView.as_view(), name='user'), # 用户中心-信息页
path('order/<int:page>', UserOrderView.as_view(), name='order'), # 用户中心-订单页
path('address/', AddressView.as_view(), name='address'), # 用户中心-地址页
]
| [
"838659435@qq.com"
] | 838659435@qq.com |
44783e23ab08ca059851488379212327deccf495 | 1d04b3471a67175b95443700ececa917d0af117a | /registration/models.py | 8f1e6edb99aa22e982535b0607ba51949fb48629 | [] | no_license | StutiSri/django | 2456adde1c46b124f703a1cd4df00fec6554fb2f | 5222bcbb106694e8cc350eebb6bb3bc4b36a2882 | refs/heads/master | 2021-01-10T14:09:03.691245 | 2016-02-01T18:40:41 | 2016-02-01T18:40:41 | 50,861,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,933 | py | from __future__ import unicode_literals
from django.db import models
class Customer(models.Model):
customer_id = models.CharField(max_length=100,primary_key=True)
customer_name = models.CharField(max_length=100)
customer_email = models.CharField(max_length=50)
customer_phone = models.CharField(max_length=10)
customer_password = models.CharField(max_length=100)
def __unicode__(self):
return self.customer_id
class Meta:
db_table = 'customer_tbl'
class Shop(models.Model):
shop_id = models.CharField(max_length=100,primary_key=True)
shop_name = models.CharField(max_length=100)
shop_address = models.CharField(max_length=200)
shop_area = models.CharField(max_length=100)
min_discount = models.IntegerField(default=5)
def __unicode__(self):
return self.shop_id
class Meta:
db_table = 'shop_tbl'
class Shopkeeper(models.Model):
shopkeeper_id = models.CharField(max_length=100,primary_key=True)
shopkeeper_name = models.CharField(max_length=100)
shopkeeper_email_id = models.CharField(max_length=50)
shopkeeper_phone_number = models.CharField(max_length=10)
shopkeeper_password = models.CharField(max_length=100)
shop_id = models.ForeignKey(Shop, on_delete=models.CASCADE)
def __unicode__(self):
return self.shopkeeper_id
class Meta:
db_table = 'shopkeeper_tbl'
class Category(models.Model):
category_id = models.CharField(max_length=3,primary_key=True)
category_name = models.CharField(max_length=100)
def __unicode__(self):
return self.map_id
class Meta:
db_table = 'category_tbl'
class ShopCategoryMapping(models.Model):
map_id = models.CharField(max_length=100,primary_key=True)
shop_id = models.ForeignKey(Shop, on_delete=models.CASCADE)
category_id = models.ForeignKey(Category, on_delete=models.CASCADE)
discount_percent = models.DecimalField(max_digits=5, decimal_places=2)
def __unicode__(self):
return self.map_id
class Meta:
db_table = 'shop_category_mapping_tbl' | [
"saumyasinha.23"
] | saumyasinha.23 |
83a50432ac2dc520c0fa7ab488b1f9a8e1c9f4f9 | b43de9dd6ab00502371ab5b7d80c931f2d9825af | /flask_learn/db_demo3/extends_demo.py | 6ec9119a22cd5805ae36266a75f51a12ab3bdec8 | [] | no_license | Airseai6/test | 43a806826004c0100942c14b3bc234a12712685c | 04704c3ff10a8e1601451aa7a5965e03dec01481 | refs/heads/master | 2020-04-16T04:29:07.376079 | 2019-11-04T14:51:36 | 2019-11-04T14:51:36 | 165,268,800 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 301 | py | #! python3
# -*- coding:utf-8 -*-
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/login/')
def login():
return render_template('login.html')
if __name__ == '__main__':
app.run(debug=True) | [
"1042536516@qq.com"
] | 1042536516@qq.com |
eb96064b42e96778d4d8b0bdffaf9669ba512f73 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2178/60692/285974.py | d49a0e281bfcaae701b49245a014c2d5ce39431b | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 816 | py | from collections import defaultdict
'''
n = int(input())
list1 = input().split(" ")
count = 0
s1 = ''
res = []
dic1 = defaultdict(int)
for i in range(n):
count += i + 1
if i == 0:
s1 = list1[i]
else:
s1 += list1[i]
if list1[i] == list1[i - 1]:
dic1[list1[i]] += 1
if dic1[list1[i]] > 1:
count += (dic1[list1[i]] - 1) * dic1[list1[i]] // 2
count -= dic1[list1[i]] * (dic1[list1[i]] + 1) // 2
elif s1[0:i].count(list1[i]) and s1.index(list1[i]) != i - 1:
count -= 1
j = i - 1
t = s1[j:]
while s1[0:j].count(t):
count -= 1
j -= 1
t = s1[j:]
res.append(count)
for j in res:
print(j)
'''
n = int(input)
print(input()) | [
"1069583789@qq.com"
] | 1069583789@qq.com |
5a68d169b1831d85bb68d490f987e3d2d2cbac5a | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startPyquil979.py | e80fc8be8c4791851baaa5a6a9e04a24ad913cfd | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,043 | py | # qubit number=5
# total number=43
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=3
prog += H(1) # number=4
prog += H(2) # number=5
prog += H(3) # number=6
prog += RX(-0.1602212253330796,1) # number=36
prog += H(4) # number=21
prog += H(0) # number=1
prog += H(1) # number=2
prog += H(2) # number=7
prog += H(3) # number=8
prog += X(0) # number=9
prog += CNOT(0,1) # number=28
prog += H(4) # number=31
prog += X(1) # number=29
prog += CNOT(0,1) # number=30
prog += CNOT(0,2) # number=22
prog += CNOT(0,2) # number=25
prog += X(2) # number=26
prog += CNOT(0,2) # number=27
prog += CNOT(0,2) # number=24
prog += X(3) # number=12
prog += X(0) # number=13
prog += X(1) # number=14
prog += X(2) # number=15
prog += X(3) # number=16
prog += H(0) # number=17
prog += H(1) # number=18
prog += H(2) # number=19
prog += H(3) # number=20
prog += H(0) # number=37
prog += CZ(1,0) # number=38
prog += H(0) # number=39
prog += Z(1) # number=34
prog += H(0) # number=40
prog += CZ(1,0) # number=41
prog += H(0) # number=42
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('5q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil979.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
| [
"wangjiyuan123@yeah.net"
] | wangjiyuan123@yeah.net |
becca70bff7e7bf31f995812875dc8047fd6f021 | 177455bdf1fece221eef93b50f16253d342aa6a6 | /alerta/api/v2/views.py | b3627e1aa8dedf3635fb8cf886a08f541b70a809 | [
"Apache-2.0"
] | permissive | ntoll/alerta | c42630d91bf16cb649b43b69ae798abe60f39ed6 | 8122526b1791a0ff0d1aa26061129892b7e86f00 | refs/heads/master | 2021-01-18T05:18:30.062671 | 2013-03-03T23:17:10 | 2013-03-03T23:17:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,637 | py | from flask import jsonify, request, current_app
from functools import wraps
from alerta.api.v2 import app, mongo
import datetime
# TODO(nsatterl): put these constants somewhere appropriate
MAX_HISTORY = -10 # 10 most recent
# TODO(nsatterl): use @before_request and @after_request to attach a unique request id
@app.before_first_request
def before_request():
# print "load config file with warning message"
pass
# TODO(nsatterl): fix JSON-P
def jsonp(func):
"""Wraps JSONified output for JSONP requests."""
@wraps(func)
def decorated_function(*args, **kwargs):
callback = request.args.get('callback', False)
if callback:
data = str(func(*args, **kwargs).data)
content = str(callback) + '(' + data + ')'
mimetype = 'application/javascript'
return current_app.response_class(content, mimetype=mimetype)
else:
return func(*args, **kwargs)
return decorated_function
@app.route('/alerta/api/v2/alerts/alert/<alertid>')
def get_alert(alertid):
alert = mongo.db.alerts.find_one({'_id': alertid})
if alert:
fix_id(alert)
return jsonify(response={'alert': alert, 'status': 'ok', 'total': 1})
else:
# TODO(nsatterl): include error message as 'message': 'not found' etc.
return jsonify(response={"alert": None, "status": "error", "message": "not found", "total": 0})
@app.route('/alerta/api/v2/alerts')
def get_alerts():
hide_details = request.args.get('hide-alert-details', False, bool)
hide_alert_repeats = request.args.getlist('hide-alert-repeats')
# TODO(nsatterl): support comma-separated fields eg. fields=event,summary
fields = dict((k, 1) for k in request.args.getlist('fields'))
# NOTE: if filtering on fields still always include severity and status in response
if fields:
fields['severity'] = 1
fields['status'] = 1
if request.args.get('hide-alert-history', False, bool):
fields['history'] = 0
else:
fields['history'] = {'slice': MAX_HISTORY}
alert_limit = request.args.get('limit', 0, int)
query = dict()
query_time = datetime.datetime.utcnow()
from_date = request.args.get('from-date')
if from_date:
from_date = datetime.datetime.strptime(from_date, '%Y-%m-%dT%H:%M:%S.%fZ')
from_date = from_date.replace(tzinfo=pytz.utc)
to_date = query_time
to_date = to_date.replace(tzinfo=pytz.utc)
query['lastReceiveTime'] = {'$gt': from_date, '$lte': to_date}
sort_by = list()
for s in request.args.getlist('sort-by'):
if s in ['createTime', 'receiveTime', 'lastReceiveTime']:
sort_by.append((s, -1)) # sort by newest first
else:
sort_by.append((s, 1)) # alpha-numeric sort
if not sort_by:
sort_by.append(('lastReceiveTime', -1))
return jsonify(details=hide_details, repeats=hide_alert_repeats, fields=fields)
@app.route('/alerta/api/v1/alerts/alert.json', methods=['POST', 'PUT'])
def create_alert(alertid):
pass
@app.route('/alerta/api/v2/alerts/alert/<alertid>', methods=['POST', 'PUT'])
def modify_alert(alertid):
pass
@app.route('/alerta/api/v2/alerts/alert/<alertid>/tag', methods=['POST', 'PUT'])
def tag_alert(alertid):
pass
@app.route('/alerta/api/v2/alerts/alert/<alertid>', methods=['DELETE'])
def delete_alert(alertid):
pass
@app.route('/alerta/api/v2/resources')
def get_resources(alertid):
pass
def fix_id(alert):
if '_id' in alert:
alert['id'] = alert['_id']
del alert['_id']
return alert
| [
"nick.satterly@guardian.co.uk"
] | nick.satterly@guardian.co.uk |
d91fffbacc9a214b1a24940f01603e5f5f4593de | 00da34ebd205f7f592219ab8710b5420185ad65d | /construct-binary-tree-from-inorder-and-postorder-traversal/solution.py | b1133d776a895a653d89b262dfe18cd6649c0583 | [] | no_license | dgoffredo/leetcode | 4ba1fbdc53001c8fcd3dc375d4ffeff24e401a93 | 3acb2cb27908ea19c01cfa3f05671b901d499362 | refs/heads/master | 2021-06-24T21:29:29.377154 | 2021-02-04T00:33:30 | 2021-02-04T00:33:30 | 190,288,460 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py |
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode:
return build_tree(inorder, postorder)
def build_tree(in_order, post_order):
def build_node(post_i, in_beg, in_end):
print(f'build_node({post_i}, {in_beg}, {in_end}):')
if in_beg == in_end:
print(f' empty, returning (None, {post_i + 1})')
return None, post_i + 1
value = post_order[post_i]
in_root = in_order.index(value, in_beg, in_end)
print(f' value={value}, in_root={in_root}')
right, right_i = build_node(post_i - 1, in_root + 1, in_end)
left, left_i = build_node(right_i - 1, in_beg, in_root)
result = TreeNode(value)
result.left = left
result.right = right
print(f' returning (TreeNode(...), {right_i})')
return result, left_i
assert len(post_order) == len(in_order)
if len(post_order) == 0:
return None
else:
root, _ = build_node(post_i=len(post_order) - 1,
in_beg=0,
in_end=len(in_order))
return root
| [
"dmgoffredo@gmail.com"
] | dmgoffredo@gmail.com |
10f00261fcd94fe0645177f4c178c23619e24d0b | 2f630a32527e6367842f1c15462e43ccfd2ef459 | /code_data_processing/2-1.extract_patches.py | 647692dc80fbad7a18a0fba7e4c3b404a3dbcee3 | [] | no_license | jligm-hash/GeneMutationFromHE | a7c704d2ee271ec5c6549ff153b1acb9842989c5 | deeba0ac2d032aa116b0a4f1b246631635c4b601 | refs/heads/main | 2023-03-16T06:19:26.605226 | 2021-03-18T23:38:54 | 2021-03-18T23:38:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,961 | py | import os
import openslide
from openslide import open_slide
from skimage import io
import numpy as np
from tqdm import tqdm
import pandas as pd
import time
import argparse
''' global variable '''
dataset = 'TCGA-BRCA'
svs_data_dir = '../data/{:s}/slides'.format(dataset)
img_data_dir = '../data/{:s}/20x_512x512'.format(dataset)
thresh_high = 220.0/255 # threshod to remove blank patches
patch_size = 512 # the size of patches extracted from 20x svs image
target_mag = 20
resize_flag = False # True: downsample the patch from 40x image by 2
# mag_missing_dict = {'TCGA-OL-A5RU-01Z-00-DX1': 20, 'TCGA-OL-A5RV-01Z-00-DX1': 20, 'TCGA-OL-A5RW-01Z-00-DX1': 20,
# 'TCGA-OL-A5RX-01Z-00-DX1': 20, 'TCGA-OL-A5RY-01Z-00-DX1': 20, 'TCGA-OL-A5RZ-01Z-00-DX1': 20,
# 'TCGA-OL-A5S0-01Z-00-DX1': 20}
slides_list = pd.read_csv('../data/{:s}/slide_selection_final.txt'.format(dataset), header=None)
slides_list = list(slides_list[0].values)
def main():
parser = argparse.ArgumentParser(description='The start and end positions in the file list')
parser.add_argument('--start', type=float, default=0.0, help='start position')
parser.add_argument('--end', type=float, default=1.0, help='end position')
args = parser.parse_args()
if not os.path.exists(img_data_dir):
os.mkdir(img_data_dir)
# load slide filenames
folders = sorted(os.listdir(svs_data_dir))
N = len(folders)
folders_to_be_processed = folders[int(N*args.start):int(N*args.end)]
N_slide = 0
for folder in folders_to_be_processed:
files = os.listdir('{:s}/{:s}'.format(svs_data_dir, folder))
for slide_filename in files:
if slide_filename[-3:] != 'svs':
continue
slide_name = slide_filename.split('.')[0]
if slide_name not in slides_list:
continue
# if slide_name != 'TCGA-DD-AACS-01Z-00-DX1':
# continue
# create folder for each slide sample if it doesn't exist
sample_folder = '{:s}/{:s}'.format(img_data_dir, slide_name)
if not os.path.exists(sample_folder):
os.makedirs(sample_folder)
# else:
# continue
N_slide += 1
print('Processing slide {:d}: {:s}'.format(N_slide, slide_name))
slidePath = '{:s}/{:s}/{:s}'.format(svs_data_dir, folder, slide_filename)
slide = open_slide(slidePath)
if 'aperio.AppMag' not in slide.properties:
print('no magnification param')
continue
# magnification = mag_missing_dict[slide_name]
else:
magnification = float(slide.properties['aperio.AppMag'])
extract_patch_size = int(patch_size * magnification / target_mag)
scale = 20.0 / patch_size
w, h = slide.level_dimensions[0]
# remove some pixels on the bottom and right edges of the image
w = w // extract_patch_size * extract_patch_size
h = h // extract_patch_size * extract_patch_size
patch_mask = np.zeros((int(h*scale), int(w*scale))) # the mask to indicate which parts are extracted
count = 0
num_patch = 0
time_slide_reading = 0
time_resize = 0
time_write_image = 0
time_all = 0
for i in tqdm(range(0, w, extract_patch_size)):
for j in range(0, h, extract_patch_size):
t1 = time.time()
patch = slide.read_region((i, j), level=0, size=[extract_patch_size, extract_patch_size])
count += 1
t2 = time.time()
# downsample to target patch size
patch = patch.resize([patch_size, patch_size])
t3 = time.time()
# check if the patch contains tissue
patch_gray = patch.convert('1')
ave_pixel_val = np.array(patch_gray).mean()
if ave_pixel_val < thresh_high:
num_patch += 1
patch_name = '{:s}/{:d}.png'.format(sample_folder, count)
patch.save(patch_name)
c1 = int(i*scale)
c2 = int((i+extract_patch_size)*scale)
r1 = int(j*scale)
r2 = int((j+extract_patch_size)*scale)
patch_mask[r1:r2, c1:c2] = 1
t4 = time.time()
time_slide_reading += t2 - t1
time_resize += t3 - t2
time_write_image += t4 - t3
time_all += t4 - t1
print('\n\tTotal time: {:.2f}'.format(time_all))
print('\tTime to load slides: {:.2f} ({:.2f} %)'.format(time_slide_reading, time_slide_reading/time_all * 100))
print('\tTime to downsample patches: {:.2f} ({:.2f} %)'.format(time_resize, time_resize/time_all * 100))
print('\tTime to write images: {:.2f} ({:.2f} %)'.format(time_write_image, time_write_image/time_all * 100))
print('\tTotal number: {:d}'.format(num_patch))
# create mask image folder if it doesn't exist
mask_folder = '{:s}/mask'.format(img_data_dir)
if not os.path.exists(mask_folder):
os.makedirs(mask_folder)
# save mask images
mask_name = '{:s}/{:s}_mask.png'.format(mask_folder, slide_name)
io.imsave(mask_name, patch_mask)
def rgb2gray(image):
gray = np.zeros((image.shape[0], image.shape[1])) # init 2D numpy array
for i in range(len(image)):
for j in range(len(image[i])):
pixel = image[i][j]
gray[i][j] = 0.299*pixel[0] + 0.587*pixel[1] + 0.114*pixel[2]
return gray
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | jligm-hash.noreply@github.com |
39c8bf640c1072ad082812245b687c2dd833a8e1 | 8e328d53e32abfdcaa0fbc304caba285124243eb | /source/add_loop.py | 8b1043d0f49241c552d09fdf12e4710237e0e3f2 | [] | no_license | labdevgen/3Dpredictor | b15e00bf3b7cea164ec24f03f1ea88c364e8d400 | 9fd0a9e87a46769fb1c160fac6943b762122e828 | refs/heads/master | 2022-06-01T10:59:45.049824 | 2021-07-12T07:34:48 | 2021-07-12T07:34:48 | 144,841,535 | 8 | 9 | null | 2022-05-24T08:04:51 | 2018-08-15T10:52:04 | Python | UTF-8 | Python | false | false | 512 | py | from LoopReader import LoopReader
from VectPredictorGenerators import loopsPredictorGenerator
def add_loop(validation_data,loop_file):
window_size = 25000 #TODO find binsize
loopsReader = LoopReader(loop_file)
loopsReader.read_loops()
loopspg = loopsPredictorGenerator(loopsReader, window_size)
contacts = validation_data[["chr", "contact_st", "contact_en", "contact_count"]].copy()
isLoop_df = loopspg.get_predictors(contacts)
validation_data["IsLoop"] = isLoop_df["IsLoop"].values
| [
"belka2195@mail.ru"
] | belka2195@mail.ru |
d96ea8ea9273607b79d71f22aee77ba011474a2a | eed4c50f5e0ab31a4262f0dd72e2dc0d784e1c35 | /store/migrations/0026_venta_created.py | f7183c7ece2720dc611ce358f8509bfcdaee09ee | [] | no_license | rogerarjona/bmsupplyoficial | 9233c39e81592647474f4f8c5a060fa1150220df | 7d22dc3bea80e7a7b7e1c931bef6869eb1fe88a9 | refs/heads/master | 2020-04-08T03:59:45.353150 | 2018-11-28T16:02:48 | 2018-11-28T16:02:48 | 158,998,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 449 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2018-11-28 09:45
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0025_venta_terminada'),
]
operations = [
migrations.AddField(
model_name='venta',
name='created',
field=models.DateTimeField(auto_now=True),
),
]
| [
"soyrogerarjona99@gmail.com"
] | soyrogerarjona99@gmail.com |
04f212c56f4573803110be66d4b7673850e3cc28 | 0faba8d9b677d12861276fdcfa614ab87926e687 | /exporter.py | eb81d1941b0c67b59d52df95d5d7bcdb4c3c9204 | [] | no_license | tkdwns750/scrapper | 92712065b08ab872809eeb83fe86e90fc19e2e55 | 3ad58bd565648249d87494e4b23bd024667715f4 | refs/heads/master | 2023-07-04T07:35:01.985923 | 2021-08-13T09:44:46 | 2021-08-13T09:44:46 | 395,572,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | py | import csv
def save_to_file(jobs):
file = open("jobs.csv", mode="w", encoding='utf-8',newline='')
writer = csv.writer(file)
writer.writerow(["title", "company", "location", "link"])
for job in jobs:
writer.writerow(list(job.values()))
return
| [
"deungdae84@naver.com"
] | deungdae84@naver.com |
fe06736dd4b00d3dcd6c7f45f5dfc19cd05f34e7 | d01056ed28b12b56b9a387cf46cfa6f30256b154 | /Rush01/rush01/urls.py | bce64c2adced5c267ed02b89875bc5601292b9db | [] | no_license | nihuynh/Piscine_Django_Python | 186f55c27445bc4915742612d11824105dcf20e1 | 1ca9bb90c016b67bfb8465a9f2fda0a36dbef3f9 | refs/heads/master | 2020-08-21T18:48:24.661258 | 2017-06-30T16:45:50 | 2017-06-30T16:45:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | """rush01 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('home.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"ajouanna@student.42.fr"
] | ajouanna@student.42.fr |
5d6001cfacad57d41ef4e23401ddea5e3e4e2f1b | 0084d528dfc4ad92c6b484da22a3ae5517d6642f | /app_1_venv/django_test/blog/migrations/0002_comment.py | f0a369cd32eda6e1b5832803a2535b5c4d755dca | [] | no_license | geonaut/Django-By-Example-Textbook | 728db344a25d07b066fc9ab46cc1fd1b726474c7 | ba928b041fd5aee493dbc988106f7ada49c2f075 | refs/heads/master | 2021-01-20T08:37:29.868769 | 2017-05-03T16:02:58 | 2017-05-03T16:02:58 | 90,167,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 988 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('name', models.CharField(max_length=80)),
('email', models.EmailField(max_length=254)),
('body', models.TextField()),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('active', models.BooleanField(default=True)),
('post', models.ForeignKey(to='blog.Post', related_name='comments')),
],
options={
'ordering': ('created',),
},
),
]
| [
"oli@ddc.uk.net"
] | oli@ddc.uk.net |
e6bbb97d5a2c9fe230fef1f4a2010fdc79ee7f05 | 0f2dd6abc15453cd083f12334c38782567a96a47 | /Exrcs 4/bottelega.py | ca0841050bd0fcd3f3fac748900e9072b3c58f95 | [] | no_license | ivanable74/my_first_python_project | bc680bd007dfc556775a8ce833ddbbef8ee313a0 | 9b60bf3044ef7478f7fe5456300beefe4afb383c | refs/heads/master | 2023-04-22T04:33:05.592330 | 2021-05-10T11:46:32 | 2021-05-10T11:46:32 | 347,362,701 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,411 | py | import telebot
import requests
import sql
import newspik as na
from api_keys import api_key_telegram
def get_key(d, value):
for k, v in d.items():
if v == value:
return k
bot = telebot.TeleBot(api_key_telegram,parse_mode=None)
selc_cat = False
sett_cat = False
selc_kw = False
sett_kw = False
dict_cat = {'Бизнес':'business', 'Развлечение':'entertainment', 'Здоровье':'health', 'Наука':'science', 'Спорт':'sports', 'Технологии':'technology'}
@bot.message_handler(commands=['start'])
def handle_start_help(message):
user_id = message.from_user.id
if(sql.check_sub(user_id)):
bot.send_message(user_id,"Вы уже подписаны")
else:
sql.add_sub(user_id)
bot.send_message(user_id,"Привет. Мы вас добавили в список подписчиков нашего бота.\n Команды ищите в readme или в списке команд бота")
@bot.message_handler(commands=['categorys'])
def handle_categ(message):
global selc_cat
global sett_cat
user_id = message.from_user.id
if(not sql.check_cat(user_id)):
markup_reply = telebot.types.ReplyKeyboardMarkup(row_width=3)
itembtn1 = telebot.types.KeyboardButton('Бизнес')
itembtn2 = telebot.types.KeyboardButton('Развлечение')
itembtn3 = telebot.types.KeyboardButton('Здоровье')
itembtn4 = telebot.types.KeyboardButton('Наука')
itembtn5 = telebot.types.KeyboardButton('Спорт')
itembtn6 = telebot.types.KeyboardButton('Технологии')
markup_reply.add(itembtn1, itembtn2, itembtn3, itembtn4, itembtn5, itembtn6)
bot.send_message(user_id,"Выберете категорию",reply_markup=markup_reply)
selc_cat = True
else:
markup_reply = telebot.types.ReplyKeyboardMarkup(row_width=2)
itembtn1 = telebot.types.KeyboardButton('Удалить категорию')
itembtn2 = telebot.types.KeyboardButton('Отмена')
markup_reply.add(itembtn1, itembtn2)
bot.send_message(user_id,f"Ваша категория - {get_key(dict_cat,sql.ret_cat(user_id))}",reply_markup=markup_reply)
sett_cat = True
@bot.message_handler(commands=['keywords'])
def handle_kw(message):
global selc_kw
global sett_kw
user_id = message.from_user.id
if(not sql.check_kw(user_id)):
bot.send_message(user_id,"Введите ключевое слово")
selc_kw = True
else:
markup_reply = telebot.types.ReplyKeyboardMarkup(row_width=2)
itembtn1 = telebot.types.KeyboardButton('Удалить ключевое слово')
itembtn2 = telebot.types.KeyboardButton('Отмена')
markup_reply.add(itembtn1, itembtn2)
bot.send_message(user_id,f"Ваше ключевое слово - {sql.ret_kw(user_id)}",reply_markup=markup_reply)
sett_kw = True
@bot.message_handler(commands=['news'])
def handle_news(message):
user_id = message.from_user.id
if(not sql.check_cat(user_id) and not sql.check_kw(user_id)):
for i,j in zip(na.ret_news(),na.ret_news_url()):
bot.send_message(user_id,f"{i} \n {j}")
elif(sql.check_cat(user_id) and not sql.check_kw(user_id)):
for i,j in zip(na.ret_news(cat=sql.ret_cat(user_id)),na.ret_news_url(cat=sql.ret_cat(user_id))):
bot.send_message(user_id,f"{i} \n {j}")
elif(not sql.check_cat(user_id) and sql.check_kw(user_id)):
for i,j in zip(na.ret_news(kw=sql.ret_kw(user_id)),na.ret_news_url(kw=sql.ret_kw(user_id))):
bot.send_message(user_id,f"{i} \n {j}")
else:
for i,j in zip(na.ret_news(kw=sql.ret_kw(user_id),cat=sql.ret_cat(user_id)),na.ret_news_url(kw=sql.ret_kw(user_id),cat=sql.ret_cat(user_id))):
bot.send_message(user_id,f"{i} \n {j}")
@bot.message_handler(func=lambda message: True)
def answer_to_message(message):
global selc_cat
global sett_cat
global selc_kw
global sett_kw
user_id = message.from_user.id
if (message.text in dict_cat and selc_cat):
sql.add_cat(user_id,dict_cat.get(message.text))
bot.send_message(user_id,"Категория добавлена успешно", reply_markup=telebot.types.ReplyKeyboardRemove())
selc_cat = False
elif (message.text == "Удалить категорию" and sett_cat):
sql.del_cat(user_id)
bot.send_message(user_id,"Категория удалена",reply_markup=telebot.types.ReplyKeyboardRemove())
sett_cat = False
elif (message.text == "Отмена" and (sett_cat or sett_kw)):
bot.send_message(user_id,"Отмена",reply_markup=telebot.types.ReplyKeyboardRemove())
sett_cat = False
elif (selc_kw):
sql.add_kw(user_id,message.text)
bot.send_message(user_id,"Ключевое слово добавлено успешно")
selc_kw = False
elif (message.text == "Удалить ключевое слово" and sett_kw):
sql.del_kw(user_id)
bot.send_message(user_id,"Ключевое слово удалено",reply_markup=telebot.types.ReplyKeyboardRemove())
sett_cat = False
else:
bot.send_message(user_id,"Что-то полшло не так. Попробуйте ещё раз")
bot.polling() | [
"noreply@github.com"
] | ivanable74.noreply@github.com |
e9e6a2bfea4fe2f830c72c4ec6491b87c98e164e | 35db3111c3cea6ded0590afb5ffb6d1f5bfae68f | /migrations/versions/55666f6099cd_.py | 7fae08751aa7542558c3ac50b67de98fd376a2f1 | [] | no_license | CavsZhouyou/C2C-Web | d08153c613676437e3e80ed9cd2c036928ecb560 | 983c0af68b0170cf3e896231ff303eb25061e9a2 | refs/heads/master | 2020-03-22T02:39:52.268232 | 2018-07-13T02:13:34 | 2018-07-13T02:13:34 | 139,385,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | """empty message
Revision ID: 55666f6099cd
Revises: 194dc955c10c
Create Date: 2018-07-11 21:24:10.419914
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '55666f6099cd'
down_revision = '194dc955c10c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('reservation', sa.Column('res_price', sa.Integer(), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('reservation', 'res_price')
# ### end Alembic commands ###
| [
"qq672804131@gmail.com"
] | qq672804131@gmail.com |
49ef83378fcd0ea9e5514661358c72f05e5b41ae | d37bac0cca5a3fce2eaeded5ab8262f3ec215b85 | /backend/home/migrations/0002_load_initial_data.py | 7de06fbaafa1d57c815551632a91d73c08613ed1 | [] | no_license | crowdbotics-apps/m-18-nov-dev-15260 | 52ada15c3d64dc0ba8fdc83a0887e830268ff02c | 4e951ccfe3ab16025f995ef8fea500522e0470e0 | refs/heads/master | 2023-01-16T06:03:20.219329 | 2020-11-18T06:47:21 | 2020-11-18T06:47:21 | 313,847,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,290 | py | from django.db import migrations
def create_customtext(apps, schema_editor):
CustomText = apps.get_model("home", "CustomText")
customtext_title = "m 18 nov"
CustomText.objects.create(title=customtext_title)
def create_homepage(apps, schema_editor):
HomePage = apps.get_model("home", "HomePage")
homepage_body = """
<h1 class="display-4 text-center">m 18 nov</h1>
<p class="lead">
This is the sample application created and deployed from the Crowdbotics app.
You can view list of packages selected for this application below.
</p>"""
HomePage.objects.create(body=homepage_body)
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "m-18-nov-dev-15260.botics.co"
site_params = {
"name": "m 18 nov",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("home", "0001_initial"),
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_customtext),
migrations.RunPython(create_homepage),
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
714d9b6f3c80fc5f49cf255d24f018aca283a816 | 38b29daadd2a47514a48ff22677a19f2cf3591b6 | /app.py | cef3c877354e66e086c48432b8b695839d9dc0a4 | [] | no_license | wobblewedge/pitt-potty | 8b991ece950539b1a0598b3748655fc1c6ca15a8 | 6eab1b76b0b47d13e1e3f8f896e4e545f022ee1d | refs/heads/master | 2023-05-04T14:05:42.578166 | 2020-08-26T00:09:02 | 2020-08-26T00:09:02 | 283,590,779 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 753 | py | from request.mountainproject import getRocks
import tweepy
import os
from decouple import config
import requests
from PIL import Image
CONSUMER_KEY=config('CONSUMER_KEY')
CONSUMER_SECRET=config('CONSUMER_SECRET')
ACCESS_TOKEN=config('ACCESS_TOKEN')
ACCESS_TOKEN_SECRET=config('ACCESS_TOKEN_SECRET')
ACCOUNT=config('ACCOUNT')
session = requests.session()
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
try:
redirect_url = auth.get_authorization_url()
auth._get_request_token
user = api.get_user(ACCOUNT)
except tweepy.TweepError:
print('Error! Failed to get request token.')
response = getRocks()
print(response)
# api.update_status(route)
| [
"andrew.d.brown1@gmail.com"
] | andrew.d.brown1@gmail.com |
1789ddf92ae9fde3a71fb684deb33619268b254c | 76efcb05773a79c9bc794a2a3464c4b07b280ddf | /Convolutional Neural Network/cnn.py | df60a10786d3a14d33259ab26e22fdc5371eb5e0 | [] | no_license | AbdulazizAlaa/Machine-Learning-Projects | 0c214ee12c0fda40e7715597dc8e3f96eb3ec8e0 | 9401f72422ff0ca199ebaab8f51a28e84cbb7498 | refs/heads/master | 2021-01-13T09:50:10.143753 | 2016-05-17T03:17:48 | 2016-05-17T03:17:48 | 53,135,822 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,271 | py | import numpy as np
import theano.tensor as T
import theano.tensor.signal.pool as pool
from theano import function
from theano import shared
from six.moves import cPickle
import pylab
from PIL import Image
def unpickle(file):
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
def show_image(img, filtered):
pylab.figure(1)
pylab.subplot(1, 3, 1); pylab.axis('off'); pylab.imshow(img)
pylab.subplot(1, 3, 2); pylab.axis('off'); pylab.imshow(filtered[0], cmap='gray')
pylab.subplot(1, 3, 3); pylab.axis('off'); pylab.imshow(filtered[1], cmap='gray')
pylab.figure(2)
pylab.subplot(1, 3, 1); pylab.axis('off'); pylab.imshow(filtered[2], cmap='gray')
pylab.subplot(1, 3, 2); pylab.axis('off'); pylab.imshow(filtered[3], cmap='gray')
pylab.subplot(1, 3, 3); pylab.axis('off'); pylab.imshow(filtered[4], cmap='gray')
# pylab.show()
def get_accuracy(predict, actual):
p = np.argmax(predict, axis=1)
num_good = len(np.where(p==actual)[0])
acc = (float(num_good)/np.shape(actual)[0])*100
return acc
class Convultional_layer:
def __init__(self, X, NUM_FILTERS, NUM_CHANELS, FILTER_DIMS, STRIDE, pool_shape, factor):
self.kernals = shared(
np.random.randn(
NUM_FILTERS,
NUM_CHANELS,
FILTER_DIMS[0],
FILTER_DIMS[1]
).astype(X.dtype)*factor,
name='kernal'
)
self.b = shared(
np.zeros(NUM_FILTERS).astype(X.dtype),
name='b'
)
# OLD Gradients
self.old_kernals = shared(
np.zeros(
(
NUM_FILTERS,
NUM_CHANELS,
FILTER_DIMS[0],
FILTER_DIMS[1]
)
).astype(X.dtype),
name='kernal'
)
self.old_b = shared(
np.zeros(NUM_FILTERS).astype(X.dtype),
name='b'
)
self.output = T.nnet.conv2d(
input=X,
filters=self.kernals,
subsample=(STRIDE, STRIDE),
border_mode='half'
)
self.output = T.nnet.relu( self.output + self.b.dimshuffle('x', 0, 'x', 'x') )
self.output = pool.pool_2d(
self.output,
pool_shape,
ignore_border='false'
)
self.params = [self.kernals, self.b]
self.old_params = [self.old_kernals, self.old_b]
class FullyConnectedLayer:
def __init__(self, X, num_hidden_neurons, num_outputs, num_inputs, factor):
self.num_inputs = num_inputs
self.NUM_HIDDEN_NEURONS = num_hidden_neurons
self.Wji = shared(
np.random.randn(
self.NUM_HIDDEN_NEURONS,
self.num_inputs
).astype(X.dtype)*factor
)
self.bji = shared(
np.zeros(
self.NUM_HIDDEN_NEURONS
).astype(X.dtype)
)
# Old gradients
self.old_Wji = shared(
np.zeros(
(
self.NUM_HIDDEN_NEURONS,
self.num_inputs
)
).astype(X.dtype)
)
self.old_bji = shared(
np.zeros(
self.NUM_HIDDEN_NEURONS
).astype(X.dtype)
)
self.num_outputs = num_outputs
self.Wkj = shared(
np.random.randn(
self.num_outputs,
self.NUM_HIDDEN_NEURONS
).astype(X.dtype)*factor
)
self.bkj = shared(
np.zeros(
self.num_outputs
).astype(X.dtype)*factor
)
# Old gradients
self.old_Wkj = shared(
np.zeros(
(
self.num_outputs,
self.NUM_HIDDEN_NEURONS
)
).astype(X.dtype)
)
self.old_bkj = shared(
np.zeros(
self.num_outputs
).astype(X.dtype)
)
Netji = T.dot(X, self.Wji.T)
Aji = T.nnet.sigmoid(Netji + self.bji)
Netkj = T.dot(Aji, self.Wkj.T)
Akj = T.nnet.softmax(Netkj + self.bkj)
self.output = Akj
self.params = [self.Wkj, self.bkj, self.Wji, self.bji]
self.old_params = [self.old_Wkj, self.old_bkj, self.old_Wji, self.old_bji]
# img = mpimg.imread('3wolfmoon.jpg')
# img = Image.open(open('3wolfmoon.jpg'))
NUM_FILTERS = 16
FILTER_SIZE = (5, 5)
NUM_FILTERS_2 = 20
FILTER_SIZE_2 = (5, 5)
NUM_FILTERS_3 = 20
FILTER_SIZE_3 = (5, 5)
STRIDE = 1
POOL_SHAPE = (2, 2)
NUM_HIDDEN_NEURONS = 1000
epochs = 15000
num_samples = 0
t_num_samples = 0
mini_batch = 200
eta = 0.5
factor = 0.001
momentum = 0.00
# Cifar-10 dataset
IMG_DIMS = (32, 32, 3)
IMG_DIMS_2 = (IMG_DIMS[0]/(POOL_SHAPE[0]*STRIDE), IMG_DIMS[1]/(POOL_SHAPE[1]*STRIDE), NUM_FILTERS)
IMG_DIMS_3 = (IMG_DIMS_2[0]/(POOL_SHAPE[0]*STRIDE), IMG_DIMS_2[1]/(POOL_SHAPE[1]*STRIDE), NUM_FILTERS_2)
IMG_DIMS_4 = (IMG_DIMS_3[0]/(POOL_SHAPE[0]*STRIDE), IMG_DIMS_3[1]/(POOL_SHAPE[1]*STRIDE), NUM_FILTERS_3)
NUM_OUTPUTS = 10
total_num_pixels = IMG_DIMS_2[0] * IMG_DIMS_2[1] * IMG_DIMS_2[2]
total_num_pixels_2 = IMG_DIMS_3[0] * IMG_DIMS_3[1] * IMG_DIMS_3[2]
total_num_pixels_3 = IMG_DIMS_4[0] * IMG_DIMS_4[1] * IMG_DIMS_4[2]
batch_1 = unpickle("../data/cifar-10-batches-py/data_batch_1")
batch_2 = unpickle("../data/cifar-10-batches-py/data_batch_2")
batch_3 = unpickle("../data/cifar-10-batches-py/data_batch_3")
batch_4 = unpickle("../data/cifar-10-batches-py/data_batch_4")
batch_5 = unpickle("../data/cifar-10-batches-py/data_batch_5")
data = []
data.append(batch_1['data'])
data.append(batch_2['data'])
data.append(batch_3['data'])
data.append(batch_4['data'])
data.append(batch_5['data'])
data = np.concatenate(data)
labels = []
labels.append(batch_1['labels'])
labels.append(batch_2['labels'])
labels.append(batch_3['labels'])
labels.append(batch_4['labels'])
labels.append(batch_5['labels'])
labels = np.concatenate(labels)
test_data = unpickle("../data/cifar-10-batches-py/test_batch")
num_samples = np.shape(data)[0]
t_num_samples = np.shape(test_data['data'])[0]
# training data
one_hot = np.zeros( (num_samples, NUM_OUTPUTS) )
one_hot[range(num_samples), labels] = 1
target = one_hot.astype('float32')
img = data.reshape((num_samples, IMG_DIMS[0], IMG_DIMS[1], IMG_DIMS[2]), order='F')
img = img.astype('float32') / 256
data_mean = data.mean()
# data_std = data.std()
data = (data - data_mean)
# data = data / data_std
########
# cov = np.dot(data.T, data) / np.shape(data)[0]
# U,S,V = np.linalg.svd(cov)
# Xrot = np.dot(data, U)
# Xrot_reduced = np.dot(data, U[:, :100])
# Xwhite = Xrot / np.sqrt(S + 1e-1)
# data = Xrot
########
img_ = data.reshape((-1, IMG_DIMS[2], IMG_DIMS[0], IMG_DIMS[0])).transpose(0, 2, 3, 1).reshape((-1, IMG_DIMS[2], IMG_DIMS[0], IMG_DIMS[1]))
# testing data
t_data = test_data['data']
t_img = t_data.reshape((t_num_samples, IMG_DIMS[0], IMG_DIMS[1], IMG_DIMS[2]), order='F')
t_img = t_img.astype('float32') / 256
t_data = (t_data - data_mean)
# t_data = t_data / data_std
########
# t_cov = np.dot(t_data.T, t_data) / np.shape(t_data)[0]
#
# t_U,t_S,t_V = np.linalg.svd(t_cov)
# t_Xrot = np.dot(t_data, U)
# t_Xwhite = t_Xrot / np.sqrt(t_S + 1e-1)
# t_data = t_Xrot
########
t_img_ = t_data.reshape((-1, IMG_DIMS[2], IMG_DIMS[0], IMG_DIMS[0])).transpose(0, 2, 3, 1).reshape((-1, IMG_DIMS[2], IMG_DIMS[0], IMG_DIMS[1]))
t_target = test_data['labels']
# ###########
# Minst dataset
# IMG_DIMS = (28, 28, 1)
# IMG_DIMS_2 = (IMG_DIMS[0]/(POOL_SHAPE[0]*STRIDE), IMG_DIMS[1]/(POOL_SHAPE[1]*STRIDE), NUM_FILTERS)
# IMG_DIMS_3 = (IMG_DIMS_2[0]/(POOL_SHAPE[0]*STRIDE), IMG_DIMS_2[1]/(POOL_SHAPE[1]*STRIDE), NUM_FILTERS_2)
# IMG_DIMS_4 = (IMG_DIMS_3[0]/(POOL_SHAPE[0]*STRIDE), IMG_DIMS_3[1]/(POOL_SHAPE[1]*STRIDE), NUM_FILTERS_3)
# NUM_OUTPUTS = 10
# total_num_pixels = IMG_DIMS_2[0] * IMG_DIMS_2[1] * IMG_DIMS_2[2]
# total_num_pixels_2 = IMG_DIMS_3[0] * IMG_DIMS_3[1] * IMG_DIMS_3[2]
# total_num_pixels_3 = IMG_DIMS_4[0] * IMG_DIMS_4[1] * IMG_DIMS_4[2]
#
# train_data = unpickle("../data/minst-train.pickle")
# test_data = unpickle("../data/minst-test.pickle")
#
# num_samples = np.shape(train_data)[0]
# t_num_samples = np.shape(test_data)[0]
#
# # testing data
# t_data = test_data[:, 1:]
#
# t_img = t_data.reshape((t_num_samples, IMG_DIMS[0], IMG_DIMS[1]), order='F')
#
# t_data = (t_data - t_data.mean())
#
# t_img_ = t_data.reshape((t_num_samples, IMG_DIMS[2], IMG_DIMS[0], IMG_DIMS[1]))
#
# t_target = test_data[:, 0]
#
# # training data
# data = train_data[:, 1:]
#
# img = data.reshape((num_samples, IMG_DIMS[0], IMG_DIMS[1]), order='F')
#
# data = (data - data.mean())
#
# img_ = data.reshape((num_samples, IMG_DIMS[2], IMG_DIMS[0], IMG_DIMS[1]))
#
# labels = train_data[:, 0]
# one_hot = np.zeros( (num_samples, NUM_OUTPUTS) )
# one_hot[range(num_samples), labels] = 1
# target = one_hot
# ###########
# Theano convolution_nnet Function
X = T.tensor4('X')
t = T.imatrix('t')
batch_size = T.iscalar('batch_size')
params = []
old_params = []
conv_layer = Convultional_layer(X=X, NUM_FILTERS=NUM_FILTERS, NUM_CHANELS=IMG_DIMS[2], FILTER_DIMS=FILTER_SIZE, STRIDE=STRIDE, pool_shape=POOL_SHAPE, factor=factor)
feature_maps = conv_layer.output
params.append(conv_layer.params)
old_params.append(conv_layer.old_params)
conv_layer_2 = Convultional_layer(X=feature_maps, NUM_FILTERS=NUM_FILTERS_2, NUM_CHANELS=IMG_DIMS_2[2], FILTER_DIMS=FILTER_SIZE_2, STRIDE=STRIDE, pool_shape=POOL_SHAPE, factor=factor)
feature_maps_2 = conv_layer_2.output
params.append(conv_layer_2.params)
old_params.append(conv_layer_2.old_params)
conv_layer_3 = Convultional_layer(X=feature_maps_2, NUM_FILTERS=NUM_FILTERS_3, NUM_CHANELS=IMG_DIMS_3[2], FILTER_DIMS=FILTER_SIZE_3, STRIDE=STRIDE, pool_shape=POOL_SHAPE, factor=factor)
feature_maps_3 = conv_layer_3.output
params.append(conv_layer_3.params)
old_params.append(conv_layer_3.old_params)
reshaped_image = T.reshape(feature_maps_3, (batch_size, total_num_pixels_3))
FC = FullyConnectedLayer(X=reshaped_image, num_hidden_neurons=NUM_HIDDEN_NEURONS, num_outputs=NUM_OUTPUTS, num_inputs=total_num_pixels_3, factor=factor)
predict = FC.output
params.append(FC.params)
old_params.append(FC.old_params)
params = np.concatenate(params)
old_params = np.concatenate(old_params)
cost = T.mean(T.nnet.categorical_crossentropy(predict, t))
gradients = []
for param in params:
gradients.append(T.grad(cost, param))
updates = []
for param, grad, old_param in zip(params, gradients, old_params):
updates.append((param, param-eta*grad+momentum*old_param))
for old_param, grad in zip(old_params, gradients):
updates.append((old_param, grad))
convolution_nnet = function(inputs=[X, t, batch_size], outputs=[feature_maps, predict, cost], updates = updates, allow_input_downcast=True, on_unused_input='ignore')
forward_prop = function(inputs=[X, batch_size], outputs=[predict], allow_input_downcast=True, on_unused_input='ignore')
# ###########
cost = []
accu = []
lastAccu = 0
for i in range(epochs):
index = np.random.randint(0, num_samples - mini_batch)
images = img[index:index+mini_batch]
mini_batch_images = img_[index:index+mini_batch]
mini_batch_target = target[index:index+mini_batch]
feature_maps, predict, c = convolution_nnet(mini_batch_images, mini_batch_target, mini_batch)
cost.append(c)
print "Epoch "+str(i)+":"
print "Cost: "+str(c)
print "Last Accuracy: "+str(lastAccu)
if(i%500 == 0 and i>0):
t_predict = forward_prop(t_img_, np.shape(t_img_)[0])
accuracy = get_accuracy(t_predict[0], t_target)
lastAccu = accuracy
accu.append(accuracy)
print "Accuracy : "+str(accuracy)+"%"
# input("insert any letter")
if(i == epochs-1):
print np.shape(feature_maps)
t_predict = forward_prop(t_img_, np.shape(t_img_)[0])
print "Accuracy : "+str(get_accuracy(t_predict[0], t_target))+"%"
show_image(images[2], feature_maps[2])
pylab.figure('3')
pylab.plot(range(epochs), cost, 'k-')
pylab.figure('4')
pylab.plot(range(len(accu)), accu, 'k-')
pylab.show()
| [
"abdulazizalaa@ymail.com"
] | abdulazizalaa@ymail.com |
9eb1b5ffb2bcf1161360fa5cd26066ab675763d1 | c4ae91e31a13865df38994c831cc891833fcb270 | /blog/migrations/0005_auto_20210819_1639.py | 4ba48a8a382a89e3775346b1e5ebe1da1f3d9917 | [] | no_license | Beefy-py/drf-react-youpost | e8eae87fb01b5e2f9e50f048563952f8899d2c17 | 241abed96a9539b42d7cd264caa08e4ee445ff04 | refs/heads/main | 2023-07-11T16:00:48.924071 | 2021-08-31T23:35:26 | 2021-08-31T23:35:26 | 387,951,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | # Generated by Django 3.2.6 on 2021-08-19 19:39
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('blog', '0004_alter_post_author'),
]
operations = [
migrations.AlterField(
model_name='post',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_post', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='post',
name='content',
field=models.TextField(),
),
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(max_length=250),
),
]
| [
"hoftkenny@gmail.com"
] | hoftkenny@gmail.com |
e3aa7c373ec864dfd88a37fa653fdd7c94ff470a | d0c0ef992f04dc5b8e01560ee084d0cbf6cb6d17 | /fyp/fyp/store/context_processor.py | 490885e09772a914f19d2178ac1390310f6d1f61 | [] | no_license | mohammadaniel/denfyp | 373d7a6b819e1b015fa730f157fde1bd9429d35b | 126d0d961129434eacb556fe2abd843c9d421449 | refs/heads/main | 2023-02-19T12:30:42.124564 | 2021-01-23T12:15:10 | 2021-01-23T12:15:10 | 332,200,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 625 | py | from .models import Category, Cart, CartItem
from .views import _cart_id
def counter(request):
item_count = 0
if 'admin' in request.path:
return{}
else:
try:
cart = Cart.objects.filter(cart_id=_cart_id(request))
cart_items = CartItem.objects.all().filter(cart=cart[:1])
for cart_item in cart_items:
item_count += cart_item.quantity
except Cart.DoesNotExist:
item_count = 0
return dict(item_count=item_count)
def menu_links(request):
links = Category.objects.all()
return dict(links=links)
| [
"noreply@github.com"
] | mohammadaniel.noreply@github.com |
1b35008cd886ea82e1f74563242e4e06c680d3dd | c0f1254595952aa4dc692f8a00596e24394ed40d | /beacon/__init__.py | 483d8b4baf5c4c0d84ae6da2efb62b973b0f6cb9 | [] | no_license | jasonsbrooks/Parsnip | adfdd3a9d4de4dc1c546bf5f274ba5ee03c784d9 | 8ea2e4adf56c7b7527265bfc17aafafda54be0f8 | refs/heads/master | 2021-01-01T16:51:20.379065 | 2014-06-06T23:03:46 | 2014-06-06T23:03:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,361 | py | def intersection2c(p0, p1):
[x0, y0, r0] = p0
[x1, y1, r1] = p1
dx = x1 - x0
dy = y1 - y0
d = ((x1 - x0)**2 + (y1 - y0)**2)**.5
if (d > (r0 + r1)):
# no solution. circles do not intersect.
return False
if (d < abs(r0 - r1)):
# no solution. one circle is contained in the other
return False
a = ((r0*r0) - (r1*r1) + (d*d)) / (2.0 * d)
x2 = x0 + (dx * a/d)
y2 = y0 + (dy * a/d)
h = ((r0*r0) - (a*a))**.5
rx = -dy * (h/d)
ry = dx * (h/d)
xi = x2 + rx
xi_prime = x2 - rx
yi = y2 + ry
yi_prime = y2 - ry
return [[xi, yi], [xi_prime, yi_prime]]
def intersection3c(p0, p1, pf):
res = intersection2c(p0, p1)
if res:
err0 = abs(((res[0][0] - pf[0])**2 + (res[0][1] - pf[1])**2)**.5 - pf[2])
err1 = abs(((res[1][0] - pf[0])**2 + (res[1][1] - pf[1])**2)**.5 - pf[2])
if err0 < err1:
return res[0]
else:
return res[1]
else:
return False
def intersection(ptdists):
# Takes a list of [x,y,r] lists and returns the most likely [x,y] location of the user.
for x in ptdists:
try:
if len(x) != 3:
return False
except:
return False
if len(ptdists) < 1:
return False
if len(ptdists) == 1:
return ptdists[0][0:2]
if len(ptdists) == 2:
return intersection2c(ptdists[0], ptdists[1])[0]
else:
ptdists = sorted(ptdists, key=lambda x:x[2])[0:3]
return intersection3c(ptdists[0], ptdists[1], ptdists[2])
| [
"jason.brooks@yale.edu"
] | jason.brooks@yale.edu |
fb59c5b2bf3b340fbeebaa17e121b85ef550abdb | daa6bc86b834a9d8d4b090dd1b0bd2762850e244 | /intro/app.py | 87a14f0ad5e41bde6e776785c6faf1762b24abbe | [] | no_license | lgigek/udemy-python-rest-api | 2ffe495109276dc50b20a71589e4130a07f2f63d | a8c801136cbce6a128f57d5d4c6ea25368daa137 | refs/heads/master | 2020-04-19T06:20:17.986038 | 2019-03-26T18:17:29 | 2019-03-26T18:17:29 | 168,014,810 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | from flask import Flask, jsonify, request
app = Flask(__name__)
@app.route('/add_two_numbers', methods=['POST'])
def add_two_numbers():
data_dictionary = request.get_json()
x = data_dictionary['x']
y = data_dictionary['y']
result = x + y
return_json = {
'result': result
}
return jsonify(return_json), 200
if __name__ == '__main__':
app.run(debug=True)
| [
"lucasgigek@gmail.com"
] | lucasgigek@gmail.com |
a85b227b221113c684d0bdf1520dd764534526b4 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/EDataServer/OAuth2ServicesClass.py | 0ce66dc147beaec73e1fb4e0fb2dfd8bde750bc9 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,857 | py | # encoding: utf-8
# module gi.repository.EDataServer
# from /usr/lib64/girepository-1.0/EDataServer-1.2.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Gio as __gi_repository_Gio
import gi.repository.GObject as __gi_repository_GObject
import gi.repository.Soup as __gi_repository_Soup
import gobject as __gobject
class OAuth2ServicesClass(__gi.Struct):
"""
:Constructors:
::
OAuth2ServicesClass()
"""
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self): # real signature unknown; restored from __doc__
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
parent_class = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
reserved = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(OAuth2ServicesClass), '__module__': 'gi.repository.EDataServer', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'OAuth2ServicesClass' objects>, '__weakref__': <attribute '__weakref__' of 'OAuth2ServicesClass' objects>, '__doc__': None, 'parent_class': <property object at 0x7f626e908ef0>, 'reserved': <property object at 0x7f626e909040>})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(OAuth2ServicesClass)
| [
"ttys3@outlook.com"
] | ttys3@outlook.com |
abe98869850825275896a66fc3291857d4606a44 | d328606b57d3d4a6392d0376bc955e20629a1ae8 | /tests/python_example.py | 2644402d1435b0abdbd23e800e9b905ac48fee3c | [
"BSD-3-Clause"
] | permissive | silasxue/garf | bf3c6f15a23f775ea93a399baa712609f0dd70cb | eaf56ab31d398f1453919ea583145978e4ef7295 | refs/heads/master | 2021-01-14T12:45:38.655312 | 2013-10-01T16:23:51 | 2013-10-01T16:23:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | #!/usr/bin/env ipython --pylab -i
import numpy as np
import garf
from sys import getrefcount
def foo():
rf = garf.RegForest_D_D_2D()
rf.forest_options.max_num_trees = 10
rf.tree_options.max_depth = 2
num_datapoints = 100
f1 = np.linspace(-3, 3, num_datapoints).reshape(num_datapoints, 1)
f2 = np.linspace(1, -2, num_datapoints).reshape(num_datapoints, 1)
features = np.hstack([f1, f2])
labels = ((features[:, 0] ** 2) + (features[:, 1])).reshape(num_datapoints, 1)
print "features:\n", features
print "labels:\n", labels
rf.train(features, labels, calc_importance=True)
importances = rf.feature_importance(features, labels)
pred_mu = np.zeros(labels.shape, dtype=labels.dtype)
pred_mu[:, :] = 0
pred_var = np.zeros(labels.shape, dtype=labels.dtype)
pred_var[:, :] = -1
leaf_indices = np.zeros((features.shape[0], rf.stats.num_trees), dtype=np.long)
leaf_indices[:, :] = -2
pred_mu, pred_var, leaf_indices = rf.predict(features, output_leaf_indices=True)
# print "vals after:", pred_mu
# print "var after:", pred_var
# print "leaf_indices:", leaf_indices
return rf, features, labels, pred_mu, pred_var, leaf_indices, importances
if __name__ == "__main__":
rf, features, labels, pred_mu, pred_var, leaf_indices, importances = foo()
| [
"malcolm.reynolds@gmail.com"
] | malcolm.reynolds@gmail.com |
11ce53d3ab52f206576889fe41cfb8d888671626 | d4e002bc615b412a41adb62711943a2ef978cfca | /Luna16PreprocessUtils.py | 64dd90c43f7757cc9eeb317821aa90d31afef1be | [] | no_license | kenkoooo/data-science-bowl-2017 | c4c5338c8aecde2724d75214e37eb4f40301c87b | d01378957eeba440ceceb9b95f645894d2c1bbdf | refs/heads/master | 2021-01-20T13:23:05.850222 | 2017-05-08T10:09:35 | 2017-05-08T10:09:35 | 90,485,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,492 | py | import SimpleITK
import numpy as np
import os
from pandas.core.frame import DataFrame
from scipy.ndimage.interpolation import zoom
def load_itk(mhd_filepath: str) -> (np.ndarray, np.ndarray, np.ndarray):
"""
This function reads a '.mhd' file using SimpleITK and return the image array,
origin and spacing of the image.
:param mhd_filepath:
:return:
"""
# Reads the image using SimpleITK
itk_image = SimpleITK.ReadImage(mhd_filepath)
# Convert the image to a numpy array first and then shuffle the
# dimensions to get axis in the order z,y,x
ct_scan = SimpleITK.GetArrayFromImage(itk_image)
# Read the origin of the ct_scan, will be used to convert the coordinates
# from world to voxel and vice versa.
origin = np.array(list(reversed(itk_image.GetOrigin())))
# Read the spacing along each dimension
spacing = np.array(list(reversed(itk_image.GetSpacing())))
return ct_scan, origin, spacing
def seq(start, stop, step=1):
n = int(round((stop - start) / float(step)))
if n > 1:
return [start + step * i for i in range(n + 1)]
else:
return []
def world_2_voxel(world_coordinates, origin, spacing):
"""
This function is used to convert the world coordinates to voxel coordinates using
the origin and spacing of the ct_scan
"""
stretched_voxel_coordinates = np.absolute(world_coordinates - origin)
voxel_coordinates = stretched_voxel_coordinates / spacing
return voxel_coordinates
def draw_circles(image: np.ndarray, candidates: DataFrame, origin: np.ndarray, spacing: np.ndarray) -> np.ndarray:
"""
This function is used to create spherical regions in binary masks
at the given locations and radius.
"""
# make empty matrix, which will be filled with the mask
resize_spacing = [1, 1, 1]
image_mask = np.zeros(image.shape)
# run over all the nodules in the lungs
for candidate in candidates.values:
# get middle x-,y-, and z-world_coordinate of the nodule
radius = np.ceil(candidate[4]) / 2
coord_x = candidate[1]
coord_y = candidate[2]
coord_z = candidate[3]
image_coord = np.array((coord_z, coord_y, coord_x))
# determine voxel coordinate given the world_coordinate
image_coord = world_2_voxel(image_coord, origin, spacing)
# determine the range of the nodule
nodule_range = seq(-radius, radius, resize_spacing[0])
# create the mask
for x in nodule_range:
for y in nodule_range:
for z in nodule_range:
world_coordinates = np.array((coord_z + z, coord_y + y, coord_x + x))
coordinates = world_2_voxel(world_coordinates, origin, spacing)
if (np.linalg.norm(image_coord - coordinates) * resize_spacing[0]) < radius:
rx = int(np.round(coordinates[0]))
ry = int(np.round(coordinates[1]))
rz = int(np.round(coordinates[2]))
image_mask[rx, ry, rz] = int(1)
return image_mask
def resample(img: np.ndarray, spacing: np.ndarray) -> (np.ndarray, np.ndarray):
"""
resize images to 1mm scale
:param img: 3D image tensor
:param spacing:
:return:
"""
# calculate resize factor
resize_spacing = [1, 1, 1]
resize_factor = spacing / resize_spacing
new_real_shape = img.shape * resize_factor
new_shape = np.round(new_real_shape)
real_resize = new_shape / img.shape
new_spacing = spacing / real_resize
return zoom(img, real_resize, mode='nearest'), new_spacing
def images_and_nodules(patient_id: str, annotations: DataFrame, image_dir: str) -> (np.ndarray, np.ndarray):
"""
extract images from mhd/raw images and create nodule masks from the annotations
:param patient_id: unique id which is called seriesuid
:param annotations: nodule annotation DataFrame
:param image_dir: directory which has CT scan images
:return: 3D image array and mask of nodules
"""
mhd_filepath = "{}/{}.mhd".format(image_dir, patient_id)
if not os.path.exists(mhd_filepath):
return None
img, origin, spacing = load_itk(mhd_filepath)
candidates = annotations[annotations["seriesuid"] == patient_id]
lung_img, new_spacing = resample(img, spacing)
nodule_mask = draw_circles(lung_img, candidates, origin, new_spacing)
return lung_img, nodule_mask
| [
"kenkou.n@gmail.com"
] | kenkou.n@gmail.com |
ee8d2fd52222b344e1e476c6d216e94219d21068 | ded9755ce320888c70d85228ee74781ccb22d49e | /env/lib/python3.7/posixpath.py | d5a9e50d9229cf9283af4171348447228d6fea9a | [] | no_license | drifftingcanoncuddle/szawiola-zadanie | 31a6fa58592e7d74cb30f3a640ddcfb7c01f142e | 26bbab19c609c9c3bdf6f8ec994b999b4e428574 | refs/heads/master | 2022-07-08T09:37:02.618354 | 2019-06-28T12:26:00 | 2019-06-28T12:26:00 | 187,694,597 | 0 | 0 | null | 2022-06-21T22:01:04 | 2019-05-20T18:37:57 | Python | UTF-8 | Python | false | false | 60 | py | /home/piotr/.pyenv/versions/3.7.3/lib/python3.7/posixpath.py | [
"ptl99t@gmail.com"
] | ptl99t@gmail.com |
1f8466a0cad59fdffa6cdc15aeff26b4f38cbf94 | d73728c650589ce41c42a10e5753b31dd9cbdbfa | /shuttle_detection/data/__init__.py | 56064bdfd3b9b5199f37e67eaf76b63d826008e4 | [] | no_license | EricYang5922/badminton | 566b46303b6353a4beec8d572d0d54ba47934644 | 75131f127d9817a33b771d05def8047d7a22591c | refs/heads/master | 2022-01-15T16:32:04.188782 | 2019-05-16T09:47:49 | 2019-05-16T09:47:49 | 186,759,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | from .dataset import shuttle | [
"wkom@LAPTOP-5QB0EKBV"
] | wkom@LAPTOP-5QB0EKBV |
a20ffd93c0dcbfea4dfc93f1a9c4a64f1c8d25aa | 36de14c6b188886df6a284ee9ce4a464a5ded433 | /Solutions/0838/0838.py | ca1246e69387c77941ed2610ee370d69c953d1e0 | [] | no_license | washing1127/LeetCode | 0dca0f3caa5fddd72b299e6e8f59b5f2bf76ddd8 | b910ddf32c7e727373449266c9e3167c21485167 | refs/heads/main | 2023-03-04T23:46:40.617866 | 2023-02-21T03:00:04 | 2023-02-21T03:00:04 | 319,191,720 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | # -*- coding:utf-8 -*-
# Author: washing
# DateTime: 2022/2/21 11:16
# File: 0838.py
# Desc:
class Solution:
def pushDominoes(self, dominoes: str) -> str:
l = list(dominoes)
status_c = l[0]
status_id = 0
for i in range(1,len(l)):
c = l[i]
if c == '.': continue
elif c == 'L':
if status_c == 'R': # 之前和当前相对,向中间靠拢
idl = status_id; idr = i
while idl < idr:
l[idl] = 'R'
l[idr] = 'L'
idl += 1
idr -= 1
status_id = i
status_c = 'L'
else: # 当前向左,之前为空或向左,中间全向左
for idx in range(status_id,i): l[idx] = 'L'
status_id = i
else:
if status_c == 'R': # 之前向右,当前向右,中间全向右
for idx in range(status_id,i): l[idx] = 'R'
status_id = i
else: # 之前向左或为空,当前向右,中间不变
status_c = 'R'
status_id = i
if l[-1] == '.' and status_c == 'R':
| [
"1014585392@qq.com"
] | 1014585392@qq.com |
2c75bd52eb9d1ca348fbc57ef0e8f848f67f6dcd | 214a34e936e5519c3e0c7ecadccb22c8e68dfcb2 | /lesson_010/01_fifth_element.py | 2b6ae440d24a2bb085d8b6132f884cbc4da821e4 | [] | no_license | Vndanilchenko/python_developer | 20d4340334fa7a1ba852920472ed6647e2d4e5fa | 4d964e7d67c3e57338b1e5f87102b35eb1914717 | refs/heads/master | 2022-04-20T21:18:26.096472 | 2020-04-11T18:36:18 | 2020-04-11T18:36:18 | 254,926,147 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,211 | py | # -*- coding: utf-8 -*-
# Умножить константу BRUCE_WILLIS на пятый элемент строки, введенный пользователем
BRUCE_WILLIS = 42
try:
input_data = input('Если хочешь что-нибудь сделать, сделай это сам: ')
print(input_data[4])
leeloo = int(input_data[4])
except ValueError as e:
print(f'символ {(e.args[0].split()[-1])} невозможно преобразовать к числу ')
except IndexError as e:
print(f'выход за границы списка ')
except:
print('что-то пошло не так')
result = BRUCE_WILLIS * leeloo
print(f"- Leeloo Dallas! Multi-pass № {result}!")
# Ообернуть код и обработать исключительные ситуации для произвольных входных параметров
# - ValueError - невозможно преобразовать к числу
# - IndexError - выход за границы списка
# - остальные исключения
# для каждого типа исключений написать на консоль соотв. сообщение
# зачет! | [
"vndanilchenko@gmail.com"
] | vndanilchenko@gmail.com |
af5e890ed0bb583636307a1cf2b0d3b8d7d1c779 | 6b66e499e7c2c6246c114029b83ae6ed3a4daa27 | /barista/kinematicplots_Bu.py | 01e0803f97f8df228c08c070882104a36adfa5fd | [] | no_license | DryRun/boffea | d837723eee13650306ede501a6e9fe1c5a9c610b | 433fdb92f3b60b6f140c0a0a3b2761d812b7044e | refs/heads/master | 2023-09-01T17:41:04.451388 | 2023-08-24T21:55:18 | 2023-08-24T21:55:18 | 232,651,843 | 1 | 0 | null | 2022-06-22T04:50:57 | 2020-01-08T20:24:05 | Python | UTF-8 | Python | false | false | 6,047 | py | import sys
import numpy as np
import matplotlib.pyplot as plt
from cycler import cycler
from coffea import hist, util
from pprint import pprint
import glob
import mplhep
plt.style.use(mplhep.style.ROOT)
plt.tight_layout()
from brazil.aguapreta import *
figure_directory = "/home/dryu/BFrag/data/kinematic/"
input_files = {
"data": glob.glob("/home/dryu/BFrag/data/histograms/Run*coffea"),
"Bu": ["/home/dryu/BFrag/boffea/barista/Bu2KJpsi2KMuMu/MCEfficiencyHistograms.coffea"],
"Bd": ["/home/dryu/BFrag/boffea/barista/Bd2KsJpsi2KPiMuMu/MCEfficiencyHistograms.coffea"],
"Bs": ["/home/dryu/BFrag/boffea/barista/Bs2PhiJpsi2KKMuMu/MCEfficiencyHistograms.coffea"],
}
coffea = {}
for what in input_files.keys():
for f in input_files[what]:
coffea_tmp = util.load(f)
# Delete Bcands trees
for key in list(coffea_tmp.keys()):
if "Bcands" in key or "cutflow" in key:
del coffea_tmp[key]
# For data, combine subjobs
#if what == "data":
# subjobs = [x.name for x in coffea_tmp["BuToKMuMu_fit_pt_absy_mass"].axis("dataset").identifiers()]
# print(subjobs)
# for key in list(coffea_tmp.keys()):
# if type(coffea_tmp[key]).__name__ == "Hist":
# if "dataset" in [x.name for x in coffea_tmp[key].axes()]:
# print("DEBUG : Attempting to group axes.")
# print("DEBUG : Input identifiers = ")
# print(coffea_tmp[key].axis("dataset").identifiers())
# print("DEBUG : attempt to group")
# print(subjobs)
# coffea_tmp[key] = coffea_tmp[key].group("dataset",
# hist.Cat("dataset", "Primary dataset"),
# {"Run2018": subjobs})
# Persistify
if not what in coffea:
coffea[what] = coffea_tmp
else:
coffea[what].add(coffea_tmp)
print(coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"].axes())
print(coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"].axis("dataset").identifiers())
plot_index = {
"Bu": {
"fit_pt":{
"hist_mc": coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"]\
.group("dataset", hist.Cat("dataset", "Primary dataset"), {"mc": ["Bu2KJpsi2KMuMu_probefilter"]})\
.integrate("fit_mass")\
.integrate("fit_absy", slice(0., 2.25))\
.rebin("fit_pt", hist.Bin("pt", r"$p_{T}$ [GeV]", 50, 0., 50.)),
"hist_data": coffea["data"]["BuToKMuMu_fit_pt_absy_mass"]\
.integrate("fit_mass")\
.integrate("fit_absy", slice(0., 2.25))\
.rebin("fit_pt", hist.Bin("pt", r"$p_{T}$ [GeV]", 50, 0., 50.)),
"xlim": [0., 50.],
"xscale": "linear",
"xlabel": r"$p_{T}$ [GeV]",
"ylim": "auto",
"yscale": "log",
"ylabel": "Events",
},
"fit_absy":{
"hist_mc": coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"]\
.group("dataset", hist.Cat("dataset", "Primary dataset"), {"mc": ["Bu2KJpsi2KMuMu_probefilter"]})\
.integrate("fit_mass")\
.integrate("fit_pt", slice(0., 30.))\
.rebin("fit_absy", hist.Bin("absy", r"|y|$", 10, 0., 2.5)),
"hist_data": coffea["data"]["BuToKMuMu_fit_pt_absy_mass"]\
.integrate("fit_mass")\
.integrate("fit_pt", slice(0., 30.))\
.rebin("fit_absy", hist.Bin("absy", r"|y|$", 10, 0., 2.5)),
"xlim": [0., 3.0],
"xscale": "linear",
"xlabel": r"$|y|$",
"ylim": "auto",
"yscale": "log",
"ylabel": "Events",
},
"fit_mass":{
"hist_mc": coffea["Bu"]["BuToKMuMu_fit_pt_absy_mass"]\
.group("dataset", hist.Cat("dataset", "Primary dataset"), {"mc": ["Bu2KJpsi2KMuMu_probefilter"]})\
.integrate("fit_absy", slice(0., 2.25))\
.integrate("fit_pt", slice(0., 30.)),
"hist_data": coffea["data"]["BuToKMuMu_fit_pt_absy_mass"]\
.integrate("fit_absy", slice(0., 2.25))\
.integrate("fit_pt", slice(0., 30.)),
"xlim": [5.05, 5.5],
"xscale": "linear",
"xlabel": r"Fitted $B_{u}$ mass [GeV]",
"ylim": "auto",
"yscale": "log",
"ylabel": "Events",
}
}
}
figure_directory = "/home/dryu/BFrag/data/kinematic"
def plot(hist_mc=None, hist_data=None, xlim=[], xscale="", xlabel="", ylim=[], yscale="", ylabel="", data_selection="", mc_selection="", savetag=""):
hist_mc = hist_mc.integrate("selection", mc_selection)
print(hist_data.axis("selection").identifiers())
hist_data = hist_data.integrate("selection", data_selection)
# Normalize MC to data
print(hist_data)
print(hist_data.values())
data_norm = hist_data.values().sum()
hist_all = copy.deepcopy(hist_data).add(hist_mc)
fig, ax = plt.subplots(1, 1, figsize=(10, 12))
hist.plot1d(hist_all, overlay="dataset", ax=ax[0])
ax[0].set_xlim(xlim)
ax[0].set_xscale(xscale)
ax[0].set_xlabel(xlabel)
ax[0].set_ylim(ylim)
ax[0].set_yscale(yscale)
ax[0].set_ylabel(ylabel)
hist.plotratio(
num=hist_all.integrate("dataset", "Run2018"),
den=hist_all.integrate("dataset", "Bu2KJpsi2KMuMu_probefilter"),
unc='num',
ax=ax[1])
ax[1].set_xlim(xlim)
ax[1].set_xscale(xscale)
ax[1].set_xlabel(xlabel)
ax[1].set_ylabel("Data / MC")
fig.savefig(f"{figure_directory}/{savetag}.png")
if __name__ == "__main__":
mc_selection = "recomatch_HLT_Mu9_IP5"
data_selection = "recotrig_HLT_Mu9_IP5"
for btype in ["Bu"]:
for plot_name, metadata in plot_index[btype].items():
plot(**metadata, savetag=f"{plot_name}_reco", mc_selection=mc_selection, data_selection=data_selection) | [
"david.renhwa.yu@gmail.com"
] | david.renhwa.yu@gmail.com |
e378f7f2b6398423bad8dbe4ace71806adf83cb1 | 29f88163a99a31eea4d2322abf584ac251983800 | /sample/text_analyzer.py | d359cc6158b0efb89a382fd7c5a2b28f42db9751 | [] | no_license | Pitenager/P2-PabloRuiz_MiguelMayoral-TDD | d3514ba6c08bbd788eedacf59ed0da2e55bdd80c | 1a5db01aaed87fd939a3efc7ee12fd03b64c7bca | refs/heads/master | 2021-09-14T07:08:37.855360 | 2018-03-19T00:00:37 | 2018-03-19T00:00:37 | 125,774,635 | 1 | 1 | null | 2018-05-09T09:12:27 | 2018-03-18T22:47:20 | Python | UTF-8 | Python | false | false | 3,959 | py | from urllib.request import urlopen
from urllib.error import URLError
import operator
#http://websitetips.com/articles/copy/lorem/ipsum.txt
class TextAnalyzer(object):
"""A class to analyze a text"""
def __init__(self):
self.palabras_leidas = {}
self.stopwords = [" about "," above "," after "," among "," at "," before "," behind "," below "," beneath "," beside "," between "," but "," by "," down "," except "
," for "," from "," in "," into "," like "," near "," of "," off "," on "," over "," since "," through "," throughout "," till "," until "," to "," under ",
" up "," upon "," with "," without "," across "," also "," anyone "," anything "," back "," bill "," both "," could "," can't "," cannot "," down ",
" due "," during "," either "," ever "," every "," few "," for "," former "," has "," hence "," hereby "," inc "," if "," it "," its "," latter ",
" less "," ltd "," many "," may "," might "," mill "," namely "," never "," nevertheless "," nobody "," noone "," often "," once "," only "," onto ",
" other "," others "," otherwise "," our "," ours "," ourselves "," own "," perhaps "," please "," per "," rather "," same "," several "," should ",
" some "," something "," somewhere "," sometimes "," somehow "," someone "," sometime "," still "," such "," than "," that "," the "," them "," then ",
" there "," their "," thereafter "," therefore "," thereby "," therein "," thereupon "," these "," those "," to "," too "," together "," towards ",
" towards "," under "," until "," very "," via "," well "," were "," what "," whatever "," when "," who "," whenever "," where "," whereby "," whereafter ",
" wherein "," wherever "," which "," while "," whom "," why "," will "," with "," within "," without "," yet "," yourself "," yourselves "]
self.symbols = ["-","_","?","!","0","1","2","3","4","5","6","7","8","9","0","/","$",".",",",";",":","%","(",")","{","}","[","]",">","<","\n",
"@", "#","+","&","~"]
@staticmethod
def prepare_text (self, text):
text = text.lower()
for line in self.stopwords:
text = text.replace(line," ")
for symbol in self.symbols:
text = text.replace(symbol,"")
text = text.split(" ")
return text
@staticmethod
def analyze (self,text):
for i in text:
claves = self.palabras_leidas.keys()
if i == "":
continue
elif i not in claves:
self.palabras_leidas[i]=1
else:
valor = self.palabras_leidas[i]
valor = valor +1
self.palabras_leidas[i] = valor
@staticmethod
def sortDict(self):
self.palabras_leidas = sorted(self.palabras_leidas.items(), key=operator.itemgetter(1))
self.palabras_leidas.reverse()
return self.palabras_leidas
@staticmethod
def printResult(self):
tamaño = len(self.palabras_leidas)
for clave in range(tamaño):
print(self.palabras_leidas[clave][0]+": ",self.palabras_leidas[clave][1])
@staticmethod
def get_text(url):
if type(url)!= str:
raise URLError("La URL no es válida")
else:
try:
response = urlopen(url)
except:
raise URLError("La URL no existe")
full_text = response.read().decode("utf-8")
return full_text
@staticmethod
def run(url):
analyzer = TextAnalyzer()
text = analyzer.get_text(url)
text = analyzer.prepare_text(analyzer,text)
analyzer.analyze(analyzer,text)
analyzer.sortDict(analyzer)
analyzer.printResult(analyzer)
| [
"pablo.ruizen@gmail.com"
] | pablo.ruizen@gmail.com |
79017f587321eaa44587098d8b578b9113ccb5f9 | 47124d93dc7a66752d9a4a5c16626bf6a8393641 | /kids-code/umbrella-stand.py | 3818895302aba7047c2b3fed1952960cfd821bcf | [] | no_license | SmokinClove/microbit-hackathon | 453f7bd2c3c23b0192a671bf4f3b58cf31c0ea30 | e44180ab78065c655d4bc642629bed5dd23b0ec4 | refs/heads/master | 2020-12-03T02:05:31.378259 | 2017-07-14T12:23:39 | 2017-07-14T12:23:39 | 95,903,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 581 | py | from microbit import *
import radio
balance = 0
send_amt = 400
while True:
received = radio.receive() #constantly checks if there's a message
if received is not None:
balance = balance + int(received)
#TODO: Unlock servo
sleep(1000)
else:
pass # Do nothing if we receive nothing
# When A and B are pressed, return some money back
if button_a.is_pressed() and button_b.is_pressed():
radio.send(str(send_amt))
balance = balance - send_amt
# TODO: Lock servo
sleep(1000)
else:
pass
| [
"lieuzhenghong@gmail.com"
] | lieuzhenghong@gmail.com |
26b1b6231d7e7bc080f2b94af71e7c0ac8357b12 | 2b21b29fbcd8b20d5e6780e3e2ded809408ac6f1 | /MARCS_Recognition_v2016.py | ff9b33ee385e86760eee31e9d349035b92d6916e | [] | no_license | xiexnot/Adaptive-Context-Discovery-and-Recognition-System-in-NTUCSIE | 28b8b62645ec02c1ea0c3c56a0d45da19f5353a8 | 3b7fd822ec908fdf821ed92127a11b52fb1ba3ce | refs/heads/master | 2021-01-17T13:03:33.628764 | 2016-07-16T15:58:24 | 2016-07-16T15:58:24 | 58,325,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,539 | py | # encoding: utf-8
# 源文件是MARCS_Recognition.py
# 写完硕论初稿之后发现增加了多层novelty detection的部分
# 实在不想去改原来的MARCS_Recognition.py(其实是已经改不动了)
# 所以重写了Recognition
import os
import sys
import math
import json
from sklearn import tree, svm, mixture
from sklearn.ensemble import AdaBoostClassifier
from sklearn.neighbors.nearest_centroid import NearestCentroid
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
from sklearn.mixture import DPGMM, GMM, VBGMM
import numpy as np
from tools import *
#----------------------------------------------
# find_max / find_min
# Description: find the max/min value in array.
#----------------------------------------------
def find_max(X):
result = X[0]
for i in range(X.__len__()):
if result < X[i]:
result = X[i]
return result
def find_min(X):
result = X[0]
for i in range(X.__len__()):
if result > X[i]:
result = X[i]
return result
#----------------------------------------------
# Read Feature
# Description: read feature information from external file
# Hints: Feature file's format
# <Index Number for total> <Feature Name>
# <Index Number for total> <Feature Name>
# <Index Number for total> <Feature Name>....
#----------------------------------------------
def read_feature(filename):
FILE = open(filename,'rU')
feature = FILE.read()
feature = feature.split('\n')
FILE.close()
if (feature[feature.__len__()-1])=="":
feature = feature[:-1]
for i in range(feature.__len__()):
feature[i] = feature[i].split("\t")
feature[i][0] = int(feature[i][0])
return feature
#----------------------------------------------
# Read Clustering
# Description: read clustering results from external file
#----------------------------------------------
def read_clustering(filename):
FILE = open(filename,'rU')
clustering = FILE.read()
clustering = clustering.split("\n")
if clustering[clustering.__len__()-1] == "":
clustering = clustering[:-1]
FILE.close()
for i in range(clustering.__len__()):
clustering[i] = int(clustering[i])
return clustering
def Initialization(filename):
FILE = open(filename,'rU')
rawdata = FILE.read()
FILE.close()
decoded = json.loads(rawdata)
print "Initial_Instance_filename = ", decoded['Initial_Instance_filename']
print "Initial_Clustering_filename = ", decoded['Initial_Clustering_filename']
#print "metric_filename = ", decoded['metric_filename']
print "feature filename = ", decoded["feature_filename"]
#metric 在 master thesis中用不太到
print "sub context = ",decoded["sub"]
Sub_Feature =[]
Sub_Clustering = []
Sub_Instance = []
for i in range(len(decoded["sub"])):
print "#",i," sub processing..."
print decoded["sub"][i]
print decoded["sub"][i]["feature_filename"]
print decoded["sub"][i]["clustering_filename"]
print decoded["sub"][i]["instance_filename"]
Sub_Feature_item = read_feature(decoded["sub"][i]["feature_filename"])
Sub_Clustering_item = read_clustering(decoded["sub"][i]['clustering_filename'])
Sub_Instance_item, tmp = read_dataset(decoded["sub"][i]["instance_filename"],'\t')
Sub_Instance_item = Convert2FloatArray(Sub_Instance_item,2)
Sub_Feature.append(Sub_Feature_item)
Sub_Clustering.append(Sub_Clustering_item)
Sub_Instance.append(Sub_Instance_item)
print "end of sub information"
Instance, tmp = read_dataset(decoded['Initial_Instance_filename'],'\t')
Instance = Convert2FloatArray(Instance,2)
Feature = read_feature(decoded['feature_filename'])
#read the clustering result from initial instances
Clustering = read_clustering(decoded['Initial_Clustering_filename'])
#Clustering_Metric = read_dataset(decoded['metric_filename'],'\t')
#Clustering_Metric = Convert2FloatArray(Clustering_Metric)
#return decoded["log_filename"], decoded["WL_filename"], decoded["Semantic_filename"], Instance , Clustering, Clustering_Metric
print "Initialization()...Completed..."
return decoded['log_filename'], decoded['WL_filename'], decoded['Semantic_filename'], Instance, Clustering, Feature, Sub_Instance, Sub_Clustering, Sub_Feature
"""
def ActivityRecognition(AR_filename, WL_filename, Semantic_filename, Instance, Clustering):
pass
#read the file from AR_filename
AR_instance = read_dataset(AR_filename,'\t')
AR_instance = Convert2FloatArray(AR_instance)
AR_instance_ARFF = ConvertInstance2ARFF(AR_instance, Clustering)
#read the semantic meaning from extrenal file
Semantic_Meaning = read_json(Semantic_filename)
#build classifier for the next step's processing
clf = BuildClassifier(Instance, Clustering, Clustering_Metric)
print "type of Semantic_Meaning = ", type(Semantic_Meaning)
is_unfamilar_pattern = -1
new_semantic_meaning = False
#for index, inst in enumerate(AR_instance_ARFF):
for index, inst in enumerate(AR_instance):
Distribution = ModelPossibilityDistribution(clf, inst)
is_familar_pattern = isFamilarPattern(Distribution, Semantic_Meaning)
print "is_familar_pattern = ", is_familar_pattern
if is_familar_pattern < 0:
print "Add a new instance into WaitingList..."
PrintInstanceWL(AR_instance[index],WL_filename)
else:
if Semantic_Meaning.has_key((is_familar_pattern)) == True:
#find propable semantic meaning
print "AR Result: " + Semantic_Meaning[str(is_familar_pattern)]
else:
#cannot find proper semantic mearning
new_semantic_meaning = True
semantic_label = raw_input('please enter the Semantic Meaning for the context')
Semantic_Meaning[str(is_familar_pattern)] = semantic_label
if new_semantic_meaning == True:
print_json_to_file(Semantic_filename, Semantic_Meaning)
return 0
"""
#----------------------------------------------
# read_json
# Description: read json data from external file
#----------------------------------------------
def read_json(filename):
FILE = open(filename,'rU')
rawdata = FILE.read()
try:
decoded = json.loads(rawdata)
except:
decoded = {}
FILE.close()
return decoded
#----------------------------------------------
# Novelty Detecting
# Description: Detect the potential novelty by using ND Classifier
#----------------------------------------------
def all_potential_novelty(Distribution):
count = 0
for i in range(Distribution.__len__()):
if Distribution[i] < -0.5:
count += 1
if count == Distribution.__len__():
return True
else:
return False
def Novelty_Detecting(instance, ND_Classifier, Sub_Feature):
result = False
print "Novelty Detecting..."
print Sub_Feature
print instance
for i in range(0,ND_Classifier.__len__()):
sub_instance = []
print "# of feature = ", len(Sub_Feature[i])
for j in range(len(Sub_Feature[i])):
sub_instance.append(instance[Sub_Feature[i][j][0]])
print sub_instance
sub_instance = np.array(sub_instance)
Distribution = []
for j in range(len(ND_Classifier[i])):
distribution = ND_Classifier[i][j].predict(sub_instance)[0]
print i,"#",j," = ", distribution
Distribution.append(distribution)
print "Distribution for ",i,"-th ND = ", Distribution
if all_potential_novelty(Distribution) == True:
return True
return result
#----------------------------------------------
# ActivityRecognition
# Description: Check whether Novelty Detection occurs.
# If novelty occurs, add the related instance into WL
#----------------------------------------------
def ActivityRecognition(AR_filename, Feature, WL_filename, Semantic_filename, ND_Classifier, Sub_Feature):
AR_instance, line = read_dataset(AR_filename,'\t')
print "AR_instance = ",AR_instance
AR_instance = Convert2FloatArray(AR_instance,2)
Semantic_Meaning = read_json(Semantic_filename)
print "AR_instance = ",AR_instance
if Novelty_Detecting(AR_instance[0], ND_Classifier, Sub_Feature) == True:
print "Novelty...Detected!..."
else:
print "Existing Context...Detected..."
return 0
#----------------------------------------------
# one_class_ND
# Description: Build up one class classifier with given instances
#----------------------------------------------
def one_class_ND(index, instance):
clustering = []
for i in range(instance.__len__()):
clustering.append(index)
print "one_class_ND ", len(instance)," ",len(clustering)
instance = np.array(instance)
clustering = np.array(clustering)
clf = svm.OneClassSVM()
clf = clf.fit(instance, clustering)
return clf
#----------------------------------------------
# ND_Classifier_BUILD
# Description: Build up single Sub or Micro Novelty Detection Classifier
# Hints:
# ND_Classifier_Sub[i]: one-class Novelty Detection for i-th context
#----------------------------------------------
def ND_Classifier_BUILD(Instance, Clustering):
pass
ND_Classifier_Sub = []
M = find_max(Clustering)
print "ND Classifier # = ", M
for i in range(M+1):
instance = []
#create instances which are only related to corresponding context
for x in range(Clustering.__len__()):
if Clustering[x] == i:
instance.append(Instance[x])
clf = one_class_ND(i, instance)
ND_Classifier_Sub.append(clf)
return ND_Classifier_Sub
#----------------------------------------------
# NoveltyDetection_BUILD
# Description: Build up All Sub or Micro Novelty Detection Classifier
# Hints:
# Sub_Instance[i]: i-th sub dataset
# Sub_Clustering[i]: i-th sub clustering result
# Sub_Instance[i][j]: j-th instance in i-th sub dataset
#----------------------------------------------
def Small_Test(ND_Classifier):
instance = [1.0,0.0,0.0,0.0,0.0]
for i in range(len(ND_Classifier[0])):
print "Small Test #",i
print ND_Classifier[0][i].predict(instance)
return 0
def NoveltyDetection_BUILD(Sub_Instance, Sub_Clustering):
print "NoveltyDetection_BUILD()..."
ND_Classifier = []
N = Sub_Instance.__len__()
for i in range(N):
print "#",i," Novelty Detection Classifier"
ND_Classifier_item = ND_Classifier_BUILD(Sub_Instance[i],Sub_Clustering[i])
ND_Classifier.append(ND_Classifier_item)
print "NoveltyDetection_BUILD()...Completed..."
Small_Test(ND_Classifier)
return ND_Classifier
#----------------------------------------------
# Halt_For_Test
# Description: Halt the program for testing
#----------------------------------------------
def Halt_For_Test():
while (1):
pass
pass
#----------------------------------------------
# Main
#----------------------------------------------
def main():
log_filename, WL_filename, Semantic_filename, Instance, Clustering, Feature, Sub_Instance, Sub_Clustering, Sub_Feature = Initialization(sys.argv[1])
ND_Classifier = NoveltyDetection_BUILD(Sub_Instance, Sub_Clustering)
print "===================================="
print 'log_filename = ',log_filename
#log file: print the Activity Recognition Component's Running Log for further processing
print 'WaitingList_filename = ', WL_filename
#Waiting List: instance which will be deleted
print 'Semantic_filename = ', Semantic_filename
#Semantic file: save semantic meaning in json format
while True:
command = raw_input('\nADD:ADD [json file]\nAR:AR [filename]\nplease enter the command:')
print 'command: '+command
command = command.split(' ')
if command[0] == 'ADD' or command[0] == 'Add':
print "MARCS Update Recognition Model...loading..."
FILE = open(command[1],'rU')
rawdata = FILE.read()
decoded = json.loads(rawdata)
FILE.close()
#update the model which is generated by Adaptation Component
#print "Instance's filename = ", command[1]
print "Instance's filename = ", decoded["instance_filename"]
#print "Instance clustering result's filename = ", command[2]
print "Instance clustering result's filename = ", decoded["clustering_filename"]
#print "Clustering Metric's filename = ", decoded["metric_filename"]
Instance, Clustering, Clustering_Metric = AddModelInstance(decoded["instance_filename"], decoded["clustering_filename"], decoded["metric_filename"], Instance, Clustering, Clustering_Metric)
print "MARCS Update Recognition Model...finished."
if command[0] == 'AR' or command[0] == 'ar':
print "MARCS Activity Recognition...loading..."
#ActivityRecognition(command[1], WL_filename, Semantic_filename, Instance, Clustering, Clustering_Metric)
ActivityRecognition(command[1], Feature, WL_filename, Semantic_filename, ND_Classifier, Sub_Feature)
print "MARCS Activity Recognition...finished."
pass
pass
return 0
if __name__=="__main__":
try:
#jvm.start()
main()
except Exception, e:
#print(traceback.format_exc())
pass
finally:
#jvm.stop()
pass
| [
"xiexnot@gmail.com"
] | xiexnot@gmail.com |
88c304f224ab60062582abbfa1146a651e1233e6 | f21814f3b4c8217e830af48b427de0b24dc398d4 | /missing_value_count_and_percent.py | aed47ea11574bbab9b091a7ff7b5448c8d28d997 | [] | no_license | CaraFJ/Utility | 2d1dbc3f09c33d9d92bf1e602f1a01b0f3ba656e | f032e6b376d65a05fe9d25fca31794c1302ec7ed | refs/heads/master | 2021-09-08T16:47:26.173366 | 2021-09-08T04:51:04 | 2021-09-08T04:52:05 | 248,438,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 496 | py | def missing_value_count_and_percent(df):
"""
Return the number and percent of missing values for each column.
Args:
df (Dataframe): A dataframe with many columns
Return:
df (Dataframe): A dataframe with one column showing number of missing values, one column showing percentage of missing values with 4 digits
"""
df = pd.concat({'num_missing_values':df.isnull().sum(), 'pct_missing_values':df.isnull().mean().round(4)}, axis=1)
)
return df | [
"cc4167@columbia.edu"
] | cc4167@columbia.edu |
0842338e3f7c2d55baa4eaa3250871d1248a4abd | 697d01d1a887c5aef4f7ca7a54309c92caa02956 | /async_dropbox/setup.py | cc0d5e3cb77ea30f2227e47e63e85dcc5227641f | [] | no_license | blickly/dropbox-ftp | 5db87f3bd776f78dad4445858340cb583fe1d790 | d62467a68ff7ad780fc6098b5875d5967ae4b6fc | refs/heads/master | 2021-01-10T19:09:13.648838 | 2012-06-12T03:20:33 | 2012-06-12T03:20:33 | 4,081,778 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | #!/usr/bin/env python
import distutils.core
version = "0.1"
distutils.core.setup(
name="async_dropbox",
version=version,
py_modules=["async_dropbox"],
)
| [
"blickly@eecs.berkeley.edu"
] | blickly@eecs.berkeley.edu |
040c1c2ce4a43b0e12e31fbfae3cdadc6d3eaece | 2221003abe21e60383bd7547104571e78d857a1d | /tic.py | f48f1399b338c91b52166b8751457b7db2822858 | [] | no_license | Manish-sain-tech/Tic_tac | 1f7cd99b95eef8fd0d624837229704974f84f802 | b2b7310ae358ea43e02c5e9ca48f3d6c99d466d1 | refs/heads/master | 2022-11-28T20:55:08.273830 | 2020-08-11T04:31:05 | 2020-08-11T04:31:05 | 286,645,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,140 | py | Board=[1,2,3,4,5,6,7,8,9]
pieceToMove='X'
def DisplayBoard():
print("\n----------")
print("|",Board[0],"|",Board[1],"|",Board[2],"|")
print("|",Board[3],"|",Board[4],"|",Board[5],"|")
print("|",Board[6],"|",Board[7],"|",Board[8],"|")
print("\n----------")
def makemove():
print("Press the number of the field :")
position=int(input())
if ((Board[position-1]=='X') or (Board[position-1]=='O')):
print("Player ",pieceToMove," choose already occupied field you lose your turn")
else:
Board[position-1]=pieceToMove
def checkBoard():
#//first player
if (Board[0]=='X' and Board[1]=='X' and Board[2]=='X'):
return 'X'
if (Board[3]=='X' and Board[4]=='X' and Board[5]=='X'):
return 'X';
if (Board[6]=='X' and Board[7]=='X' and Board[8]=='X'):
return 'X'
if (Board[0]=='X' and Board[3]=='X' and Board[6]=='X'):
return 'X';
if (Board[1]=='X' and Board[4]=='X' and Board[7]=='X'):
return 'X'
if (Board[2]=='X' and Board[5]=='X' and Board[8]=='X'):
return 'X';
if (Board[0]=='X' and Board[4]=='X' and Board[8]=='X'):
return 'X'
if (Board[2]=='X' and Board[4]=='X' and Board[6]=='X'):
win=1;
return 'X';
#player2
if (Board[0]=='O' and Board[1]=='O' and Board[2]=='O'):
return 'O'
if (Board[3]=='O' and Board[4]=='O' and Board[5]=='O'):
return 'O';
if (Board[6]=='O' and Board[7]=='O' and Board[8]=='O'):
return 'O'
if (Board[0]=='O' and Board[3]=='O' and Board[6]=='O'):
return 'O';
if (Board[1]=='O' and Board[4]=='O' and Board[7]=='O'):
return 'O'
if (Board[2]=='O' and Board[5]=='O' and Board[8]=='O'):
return 'O';
if (Board[0]=='O' and Board[4]=='O' and Board[8]=='O'):
return 'O'
if (Board[2]=='O' and Board[4]=='O' and Board[6]=='O'):
return 'O';
return '/';
#main
print("Welcome to the TicTacToe game!\nTake turns entering the position (1..9)\ninto which your piece will be placed.\nPress any number to start\nenter '0' for exit");
k=int(input())
if k==0:
exit()
else:
DisplayBoard();
while (1):
makemove()
DisplayBoard()
if (checkBoard() == 'X'):
print( "X wins!" );
break;
elif (checkBoard() == 'O'):
print( "O wins!" );
break;
if pieceToMove=='X':
pieceToMove='O'
else:
pieceToMove='X'
| [
"noreply@github.com"
] | Manish-sain-tech.noreply@github.com |
a6e07d6df2af9961b2811bf18cceadb594a09b55 | c2f8b58eed5cd86f3170d334f20f96e6ec4347a2 | /assetel_holidays/models/hr_leave_table.py | 09217d069da12600f6d753252d4fc76d761da658 | [] | no_license | marcoanc95/recursos-humanos | fb34496f1bbe3e4f144d602993af08e748cb29d6 | 9dc04dad06167c9dec2fdb0dd6845ae7c61850e2 | refs/heads/main | 2023-07-04T20:49:22.845367 | 2021-08-12T22:23:30 | 2021-08-12T22:23:30 | 395,365,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | # -*- coding: utf-8 -*-
from odoo import models, api, fields, exceptions
class HrLeaveCalculation(models.Model):
_name = 'hr.leave.table'
hr_leave_type_id = fields.Many2one('hr.leave.type', string='Tipo de ausencia')
years = fields.Float('Antigüedad / Años')
holidays = fields.Float('Vacaciones / Días')
| [
"marcoanc95@gmail.com"
] | marcoanc95@gmail.com |
e80fec56cc5ce8d2dc64e1e008e4957a6efed2d4 | 564cc02902ea29d4bf2f29767d5b616ebabe85ec | /panic2.py | 890c6bac3c6c6399904b5d4d044e8df2b62f27d2 | [
"MIT"
] | permissive | melvin10001/pythonStuff | db40fbb99664448f7ef9fba5bfde63f54d643673 | 3d2851884b398cb2a0a24a7758710ce0beadecd7 | refs/heads/master | 2021-01-01T18:50:05.686025 | 2018-05-06T16:38:29 | 2018-05-06T16:38:29 | 98,442,337 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | phrase = "Don't panic!"
plist = list(phrase)
print(phrase)
print(plist)
plist = plist[1:8]
first = plist[0:2]
second = plist[3:7]
first.extend(second[1])
first.extend(second[0])
first.extend(second[2:])
print(''.join(first[0:4]) + ''.join(first[-1:-3:-1]))
| [
"mtrojanowski@networkedassets.com"
] | mtrojanowski@networkedassets.com |
cd77f89cfdd51067f94362fccd39ce7853994842 | a391d8e1eead39991c067fab334ea532634e2724 | /test/conftest.py | 3b11148b1a7c27ff2401333a4e185a6598119c2f | [
"MIT"
] | permissive | CSS-Electronics/canedge_manager | 08c38448c65d841847167798f8c7297a2a04f6c2 | 5a78cc72d58e2b38911e8ec63b57ee49ed40d476 | refs/heads/master | 2023-02-07T07:16:35.518044 | 2023-01-24T09:41:30 | 2023-01-24T09:41:30 | 212,292,612 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 731 | py | import pytest
from time import sleep
from test.s3Client import S3Client
from test.s3ServerMinio import S3ServerMinio
@pytest.fixture
def bucket_name():
return 'testbucket'
@pytest.fixture
def s3_server(tmpdir):
server = S3ServerMinio(tmpdir)
server.start()
sleep(5)
yield server
server.stop()
sleep(1)
@pytest.fixture
def s3_client(s3_server, bucket_name):
client = S3Client(endpoint=s3_server.endpoint_ip + ":" + str(s3_server.port),
access_key=s3_server.accesskey,
secret_key=s3_server.secretkey,
secure=False)
# Clear bucket
client.remove_bucket_r(bucket_name)
client.make_bucket(bucket_name)
return client
| [
"css@csselectronics.com"
] | css@csselectronics.com |
49975c06849167c6e24ad5be9fe79b31421d92d1 | 3823184746aafe5b66a6f574f2793cd80decc9a5 | /app/forms.py | b1e5a4869115f3a657153957a39a26f3ef6af8d4 | [] | no_license | sunillucky033/todo | 15b8bb1b42530047803696a2e973c3b78e702f4f | b230770826fe6523f9fa6cfe84e139d09d9eb4f3 | refs/heads/master | 2022-06-23T14:27:53.358774 | 2020-05-03T12:58:58 | 2020-05-03T12:58:58 | 260,913,305 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | from django import forms
from django.contrib.auth.models import User
from app.models import userinfo
class userform(forms.ModelForm):
password=forms.CharField(widget=forms.PasswordInput())
class Meta:
model=User
fields=('username','email','password')
class userinfoform(forms.ModelForm):
class Meta:
model=userinfo
fields=('info','portfolio_site') | [
"sunillucky033@gmail.com"
] | sunillucky033@gmail.com |
22aa6514e869702a198caed550e4d96f263b0800 | 8bfab2455f989bf091ba45181a4b4248226974c2 | /timeDiff.py | 1906cc5212fa4d700a91d675b6366c174ddeb5d2 | [] | no_license | itsmesiva80/PythonLearnings | 7f6f83abcdc775d2302e34ae52f02affdc9db494 | dd3564dcf2c46320fb7eb35b53f12a2bccc83ac3 | refs/heads/master | 2020-12-01T08:48:41.660576 | 2018-02-13T15:12:19 | 2018-02-13T15:12:19 | 67,794,885 | 2 | 0 | null | 2016-09-14T10:08:56 | 2016-09-09T11:52:00 | Python | UTF-8 | Python | false | false | 703 | py | import datetime
from datetime import timedelta
from openpyxl import Workbook, load_workbook
wb = load_workbook('/users/sthammana/Documents/CI_Jenkis_List.xlsx')
ws = wb.active
for i in range(2,86):
cellVal = str(ws.cell(row=i,column = 3).value)
cellVal1 = ws.cell(row=i,column = 3).value
cellVal2 = ws.cell(row=i,column = 4).value
if cellVal != "None":
timeDiff = datetime.datetime.strptime(cellVal1, "%Y-%m-%d %H:%M")-datetime.datetime.strptime(cellVal2, "%Y-%m-%d %H:%M")
minDiff=timeDiff/timedelta(minutes=1)
minDiff=str(minDiff).replace(".0"," minutes").replace("-","")
ws.cell(row=i,column = 5).value = minDiff
print(minDiff)
wb.save('/users/sthammana/Documents/CI_Jenkis_List.xlsx')
| [
"itsmesiva80@gmail.com"
] | itsmesiva80@gmail.com |
724c2486cf036cf7a32b21e379fb8c80dca44296 | c489b42e8184f7e12aa549742056e5cf5ce5995f | /random_control/__init__.py | 825c4f13f96142717a3de9939e55a00a578e4b80 | [
"BSD-3-Clause-LBNL"
] | permissive | gonzalorodrigo/ScSFWorkload | 7958352b0f982e7dce98d78b32b64a7fc067747e | 2301dacf486df8ed783c0ba33cbbde6e9978c17e | refs/heads/master | 2020-04-26T10:47:18.942170 | 2019-03-02T20:51:27 | 2019-03-02T20:51:27 | 173,496,458 | 1 | 1 | NOASSERTION | 2019-03-02T20:51:28 | 2019-03-02T20:38:24 | Python | UTF-8 | Python | false | false | 1,341 | py | """ This package provides a global random generator to be able to run
experiments with determinsim.
All code using get_rand_gen will use the same random generator if
previously set_globral_random_ge has been used with one of its parameters
not None.
To repeat experiments, use set_global_random_gen before the experiment
with the same "seed" value.
"""
import random
global_random_gen = None
def set_global_random_gen(seed=None,random_gen=None):
"""Used by the whole package to get the random generator by all the
code. Set it to the same seed initiated random object.
Args:
- seed: if set to a _hashable_ obj, a new random generator is
created with seed as seed.
- random_gen: Random object to be set as global. this argument
is ignored if seed is set.
"""
global global_random_gen
if seed is not None:
global_random_gen = random.Random()
global_random_gen.seed(seed)
else:
global_random_gen = random_gen
def get_random_gen(seed=None):
"""Returns the global random generator if it is set, creates a new one
otherwise. If seed is set it will be used for that purpose."""
global global_random_gen
if global_random_gen is not None:
return global_random_gen
r = random.Random()
r.seed(a=seed)
return r | [
"grodrigo@apple.com"
] | grodrigo@apple.com |
cca009fb5e8ede0c9e13e2e0380ed2c2d39b40df | 1a990ade17103e4b5c259bffbb4143c33c6abaf7 | /resources/migrations/0032_auto_20171024_0841.py | 2ee362c58aaf0b7b20070a45943701dbc251c53e | [] | no_license | mindwaveventures/good-thinking | 538fc4ae6dcc9e9cf1a80605f20dc5175849ffa2 | ab2ee645a5ec22268808f12a9a844b3c07e3a3b3 | refs/heads/master | 2021-06-26T20:31:08.972260 | 2019-01-31T10:31:26 | 2019-01-31T10:31:26 | 88,872,298 | 1 | 3 | null | 2019-01-01T17:00:25 | 2017-04-20T14:02:49 | HTML | UTF-8 | Python | false | false | 612 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-10-24 08:41
from __future__ import unicode_literals
from django.db import migrations
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('resources', '0031_resourcepagebuttons'),
]
operations = [
migrations.AlterField(
model_name='home',
name='description',
field=wagtail.wagtailcore.fields.RichTextField(blank=True, help_text='\n A short description of the page that will show on the homepage\n ', max_length=206),
),
]
| [
"katherine.bowler@gmail.com"
] | katherine.bowler@gmail.com |
03054773cef5f4e14df549a43a70cc82f3b67c06 | 2371ae6248132276409733d31e1f9d181fa3d3dd | /netcat_python/nc.py | b9388ea59f8b9134413f3c8d7bbdd7e6b8b34250 | [] | no_license | mcardacci/tools_of_the_dark_arts | 859f6ad663cbc76939bf8de47a8a9bb317c7ff57 | 3197b9c0ea39285f420d6ee03366e3fddfccadef | refs/heads/master | 2021-01-20T06:12:46.117021 | 2017-10-20T14:44:47 | 2017-10-20T14:44:47 | 89,854,570 | 0 | 1 | null | 2017-10-20T14:44:48 | 2017-04-30T14:08:31 | Python | UTF-8 | Python | false | false | 712 | py | #!/usr/bin/python
import socket
import subprocess
def netcat(hostname, port,command):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((hostname, port))
op = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
if op:
output=str(op.stdout.read())
print "Output:",output
s.sendall(output)
else:
error=str(op.stderr.read())
print "Error:",error
s.sendall(error)
s.shutdown(socket.SHUT_WR)
while 1:
data = s.recv(1024)
if data == "":
break
print "Received:", repr(data)
print "Connection closed."
s.close()
netcat('127.0.0.1', 4444, 'pwd')
| [
"marco.cardacci@gmail.com"
] | marco.cardacci@gmail.com |
a9c59d6072d9aa0da5ae5339a28361d16a0b0e79 | 3595ad597f6314aeb8d40b7c516bd5fd510ded3c | /setup.py | 035b1cbcbfb8bb73b92f9619a161fa9b34388627 | [] | no_license | l34marr/shiji.content | 47a4ec38396fde3180e39ea8d04ebcecf3c5848b | d6a106337145ab385a9713cb81267ea5d57dd2a5 | refs/heads/master | 2021-01-01T19:42:34.536306 | 2014-07-28T09:03:00 | 2014-07-28T09:03:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | from setuptools import setup, find_packages
import os
version = '0.1'
setup(name='shiji.content',
version=version,
description="ShiJi Content Types",
long_description=open("README.rst").read() + "\n" +
open(os.path.join("docs", "HISTORY.rst")).read(),
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='',
author_email='',
url='http://github.com/l34marr/shiji.content',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['shiji'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'plone.app.dexterity [grok]',
'plone.namedfile [blobs]',
'collective.dexteritytextindexer'
],
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
# The next two lines may be deleted after you no longer need
# addcontent support from paster and before you distribute
# your package.
# setup_requires=["PasteScript"],
# paster_plugins = ["ZopeSkel"],
)
| [
"marr.tw@gmail.com"
] | marr.tw@gmail.com |
b9515ac414e5682d45d712d99903e92145a5210f | 72319a157507386e7ef9210de3b1e1ab673dcd8c | /activities/jawbone/__init__.py | e5aa8a881b2b208cab7cfcd8a09df8c1327e542b | [
"MIT"
] | permissive | mcescalante/open-humans | 9ddd2b9c89094f05492cb10eebfd9994ecffbc95 | fe474530c8492ad9925e91f72e7736406b7e42e6 | refs/heads/master | 2021-04-29T18:00:54.049868 | 2018-03-05T06:02:20 | 2018-03-05T06:02:20 | 121,683,595 | 0 | 0 | null | 2018-02-15T21:14:39 | 2018-02-15T21:14:38 | null | UTF-8 | Python | false | false | 93 | py | default_app_config = '{}.apps.JawboneConfig'.format(__name__)
label = __name__.split('.')[1]
| [
"beau@beaugunderson.com"
] | beau@beaugunderson.com |
a386f6d6650824820e3f94af51c88e231351858e | 93ada1900fd2864dea285597080eac21dce652ca | /deploy/app.py | 0e9ddfdf47d6c1f2f721f8b37dcc52c7476e5039 | [] | no_license | elcolie/katacoda-k8s | a0c679705dbeb5d8138e32e781914849708f6dfd | 95ea939d50e2dc44530f0fec7b83b02547413a78 | refs/heads/master | 2020-04-26T09:19:25.022680 | 2019-03-15T03:36:48 | 2019-03-15T03:36:48 | 173,451,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,040 | py | #!/usr/bin/python
import time, requests, os
from flask import Flask
app = Flask(__name__)
START = time.time()
def elapsed():
running = time.time() - START
minutes, seconds = divmod(running, 60)
hours, minutes = divmod(minutes, 60)
return "%d:%02d:%02d" % (hours, minutes, seconds)
def update():
try:
file = open("uuid.txt","r")
cid = file.read()
except:
cid = '100'
if "KUBERNETES_SERVICE_HOST" in os.environ:
ea = 'step5'
else:
ea = 'step2'
r = requests.post('http://www.google-analytics.com/collect',
data = {'v': 1,
'tid': 'UA-57322503-11',
'cid': cid,
't': 'event',
'ec': 'tutorial',
'ea': ea
})
@app.route('/')
def root():
update()
return "Hello World! (up %s)\n" % elapsed()
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080) | [
"sarit.r@hbot.io"
] | sarit.r@hbot.io |
b0d956a4ca3ec1ac31bf5cad07abae11dbaf66c2 | 3e22a1c2ea0c7004a87d899bb4081880d5f00a1e | /app/app/main.py | e16b7f64fb8b44c6ed449846a8838fe16d27c174 | [] | no_license | pariazeynali/example-flask-package-python3.8 | d0ad4ba70fe531b4868ebf608d2a30af4be69e63 | 2602a7a3f269d06dccaf593e7f229945e40415ec | refs/heads/master | 2023-07-16T15:30:32.878157 | 2021-08-09T12:06:28 | 2021-08-09T12:06:28 | 394,275,849 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,427 | py | import os
import sys
from flask import Flask
from flask_jwt_extended import JWTManager
from flask_restful import Api
from dotenv import load_dotenv, find_dotenv
from db.db_session import engine
from db.modelbase import SQLAlchemyBase
from resourse.user_resourse import UserRegister, User, UsersList, UserLogin
from resourse.package_resourse import Package, PackageList, AddPackage
folder = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, folder)
app = Flask(__name__)
app.config['PROPAGATE_EXCEPTION'] = True
app.secret_key = os.getenv('SECRET_KEY')
api = Api(app)
def main():
setup_db()
app.run(host="0.0.0.0", debug=True, port=81)
load_dotenv(find_dotenv())
# noinspection PyUnresolvedReferences
def setup_db():
import data.__all_models
SQLAlchemyBase.metadata.create_all(engine)
jwt = JWTManager(app)
@jwt.user_claims_loader
def add_claims_to_jwt(identity):
admin_username = os.getenv('ADMIN_USERNAME')
if identity == admin_username:
return {'is_admin': True}
return {'is_admin': False}
api.add_resource(UserRegister, '/register')
api.add_resource(Package, '/package/<string:name>')
api.add_resource(AddPackage, '/add-package')
api.add_resource(PackageList, '/packages')
api.add_resource(User, '/user/<string:username>')
api.add_resource(UsersList, '/users')
api.add_resource(UserLogin, '/login')
if __name__ == '__main__':
main()
| [
"pariazeynali1999@gmail.com"
] | pariazeynali1999@gmail.com |
2be3a8b7890facff2cf6c87368ccbe0da31909e0 | ca57827a11dc243f3a173b444b3e7feed1b99316 | /tpot_iris_pipeline_g10_ps40.py | c9bc88b1cff5a7a18f75c84fd669a6364cf33e00 | [] | no_license | haroonsaeed82096/AutoML | 41e4d92ab82ece16867e2dc6445e41ac4b14ccab | 7ebdbba1271164ea89a2d5e358258abe82e22c56 | refs/heads/master | 2020-03-27T03:43:16.029196 | 2018-08-23T20:31:27 | 2018-08-23T20:31:27 | 145,846,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 974 | py | import numpy as np
import pandas as pd
from sklearn.kernel_approximation import Nystroem
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.tree import DecisionTreeClassifier
# NOTE: Make sure that the class is labeled 'target' in the data file
tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR', dtype=np.float64)
features = tpot_data.drop('target', axis=1).values
training_features, testing_features, training_target, testing_target = \
train_test_split(features, tpot_data['target'].values, random_state=42)
# Score on the training set was:0.9738801054018446
exported_pipeline = make_pipeline(
Nystroem(gamma=0.35000000000000003, kernel="cosine", n_components=2),
DecisionTreeClassifier(criterion="gini", max_depth=2, min_samples_leaf=1, min_samples_split=8)
)
exported_pipeline.fit(training_features, training_target)
results = exported_pipeline.predict(testing_features)
| [
"noreply@github.com"
] | haroonsaeed82096.noreply@github.com |
64b0a2ec24709df055ec141ef2b359a29030c8cd | fa9e05a132ab03a1c266ba811c645820123cf161 | /Bounds_classifier_comparison/Test_ML/MLPC_skl_module.py | 968eca01f717dbca4e23b637d2a513d35b1ccc02 | [] | no_license | Edgar-La/Machine_Learning | b1f9dd72f1de916882b0bee79707a9a62d664fdf | cfc7c6fe158a4a49be7e9727c286b98fd9d6756d | refs/heads/main | 2023-05-22T15:28:27.838947 | 2021-06-12T19:29:13 | 2021-06-12T19:29:13 | 334,308,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | import numpy as np
from sklearn.neural_network import MLPClassifier
#This method is call by the MAIN SCRIPT
def run_MLPC_skl(X, y_label, xx, yy, Epochs=1):
clf = MLPClassifier(random_state=1, max_iter=Epochs)
clf.fit(X, y_label)
A = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = A.reshape(xx.shape)
return Z | [
"laraae2018@licifug.ugto.mx"
] | laraae2018@licifug.ugto.mx |
27b94c9d7849b71176bca1cb1da910235230ce4d | c087e0bbeeac080335240c05255bd682cfea100e | /remap_reads_consensus.py | f0decb7ac4e13e3cab2add1986e43a77371c997a | [] | no_license | ifiddes/notch2nl_10x | f537481da544ec5e3c62a2899b713b4cb68e7285 | 35cfd95b0e7563bad0c5d2354fd7be526bc3a39d | refs/heads/master | 2021-01-10T10:18:59.098115 | 2016-03-24T17:43:03 | 2016-03-24T17:43:03 | 50,366,711 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,718 | py | """
Run the traditional WGS-SUN based pipeline on 10x data to compare to the results
"""
import pysam
import sys
import vcf
import string
import itertools
import numpy as np
import argparse
import tempfile
import os
import subprocess
from pyfasta import Fasta
from operator import itemgetter
from itertools import groupby
from collections import Counter, defaultdict
sys.path.append("/hive/users/ifiddes/pycbio")
from pycbio.sys.procOps import runProc, callProc
from pycbio.sys.fileOps import tmpFileGet
from pycbio.sys.mathOps import format_ratio
import matplotlib
matplotlib.use('Agg')
matplotlib.rcParams['pdf.fonttype'] = 42
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import pandas as pd
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('inBam', help='(10x) bamfile to remap')
parser.add_argument('outPdf', help='path to write plot to')
parser.add_argument('--outBam', default=None, help='path to write consensus aligned bam to')
parser.add_argument('--consensusVcf', default='/hive/users/ifiddes/notch2nl_suns/Notch2NL_SUN_UniqueIndels_ConsensusRef.vcf.gz')
parser.add_argument('--consensusRef', default='/hive/users/ifiddes/notch2nl_suns/notch2_aligned_consensus.fasta')
return parser.parse_args()
regions = [['chr1', 119990189, 120163923, 'Notch2'],
['chr1', 146149601, 146329308, 'Notch2NL-A'],
['chr1', 148597945, 148786127, 'Notch2NL-B'],
['chr1', 149349476, 149477855, 'Notch2NL-C'],
['chr1', 120706154, 120801963, 'Notch2NL-D']]
def extract_reads(bam, offset=50000):
tmp_reads = tmpFileGet(suffix='reads.fq')
tmp_shuf = tmpFileGet()
region_strs = ['{}:{}-{}'.format(chrom, start - offset, stop + offset) for chrom, start, stop, para in regions]
view_cmd = ['samtools', 'view', '-b', bam]
view_cmd.extend(region_strs)
cmd = [view_cmd,
['samtools', 'bamshuf', '-Ou', '-', tmp_shuf],
['samtools', 'bam2fq', '-']]
with open(tmp_reads, 'w') as tmp_paired_h:
runProc(cmd, stdout=tmp_reads)
return tmp_reads
def remap_reads(tmp_reads, index, out_bam):
sort_tmp = tmpFileGet()
cmd = [['bwa', 'mem', '-p', index, tmp_reads],
['samtools', 'view', '-b', '-'],
['samtools', 'sort', '-T', sort_tmp, '-O', 'bam', '-']]
with open(out_bam, 'w') as f_h:
runProc(cmd, stdout=f_h)
cmd = ['samtools', 'index', out_bam]
runProc(cmd)
def build_remapped_bam(in_bam, consensus_ref, out_bam):
tmp_reads = extract_reads(in_bam)
remap_reads(tmp_reads, consensus_ref, out_bam)
os.remove(tmp_reads)
def pileup(out_bam, vcf_path):
bases = {"A", "T", "G", "C", "a", "t", "g", "c"}
vcf_handle = vcf.Reader(open(vcf_path))
wgs_results = defaultdict(list)
for vcf_rec in vcf_handle:
if vcf_rec.is_indel:
continue
pos_str = "{0}:{1}-{1}".format(vcf_rec.CHROM, vcf_rec.POS)
cmd = ['samtools', 'mpileup', '-r', pos_str, out_bam]
mpileup_rec = callProc(cmd).split()
pile_up_result = Counter(x.upper() for x in mpileup_rec[4] if x in bases)
sample_dict = {s.sample: s.gt_bases for s in vcf_rec.samples}
for s in vcf_rec.samples:
if len([x for x in sample_dict.itervalues() if x == s.gt_bases]) != 1:
continue
if s.gt_bases is None:
continue
c = 1.0 * pile_up_result[s.gt_bases] / len(mpileup_rec[4])
c *= 1.0 * len([x for x in sample_dict.itervalues() if x is not None]) / len(sample_dict)
wgs_results[s.sample].append([vcf_rec.POS, c])
return wgs_results
def plot_results(wgs_results, out_pdf, aln_size):
paralogs = ['Notch2', 'Notch2NL-A', 'Notch2NL-B', 'Notch2NL-C', 'Notch2NL-D']
fig, plots = plt.subplots(5, sharey=True, sharex=True)
plt.yticks((0, 0.1, 0.2, 0.3, 0.4))
plt.ylim((0, 0.4))
xticks = range(0, int(round(aln_size / 10000.0) * 10000.0), 10000)
plt.xticks(xticks, rotation='vertical')
plt.xlim((0, aln_size))
plt.xlabel("Alignment position")
for i, (p, para) in enumerate(zip(plots, paralogs)):
p.set_title(para)
wgs = wgs_results[para]
xvals, yvals = zip(*wgs)
p.vlines(xvals, np.zeros(len(xvals)), yvals, color=sns.color_palette()[0], alpha=0.7, linewidth=0.8)
# mark the zeros
zero_wgs = [[x, y + 0.02] for x, y in wgs if y == 0]
if len(zero_wgs) > 0:
z_xvals, z_yvals = zip(*zero_wgs)
p.vlines(z_xvals, np.zeros(len(z_xvals)), z_yvals, color=sns.color_palette()[2], alpha=0.7, linewidth=0.8)
plt.tight_layout(pad=2.5, h_pad=0.25)
zero_line = matplotlib.lines.Line2D([], [], color=sns.color_palette()[2])
reg_line = matplotlib.lines.Line2D([], [], color=sns.color_palette()[0])
fig.legend(handles=(reg_line, zero_line), labels=["WGS SUN Fraction", "WGS Missing SUN"], loc="upper right")
fig.text(0.01, 0.5, 'SUN fraction of reads', va='center', rotation='vertical')
plt.savefig(out_pdf, format="pdf")
plt.close()
def get_aln_size(consensus_ref):
f = Fasta(consensus_ref)
assert len(f) == 1
return len(f[f.keys()[0]])
def main():
args = parse_args()
if args.outBam is None:
out_bam = tmpFileGet(suffix='merged.sorted.bam')
else:
out_bam = args.outBam
build_remapped_bam(args.inBam, args.consensusRef, out_bam)
wgs_results = pileup(out_bam, args.consensusVcf)
aln_size = get_aln_size(args.consensusRef)
plot_results(wgs_results, args.outPdf, aln_size)
if args.outBam is None:
os.remove(out_bam)
if __name__ == '__main__':
main()
| [
"ian.t.fiddes@gmail.com"
] | ian.t.fiddes@gmail.com |
ba30cd7ebf016f1a46e3282de564f20f6562d69f | 0612985c5fb348e62b31ce51da9d28264f82a6fb | /wedapp/apps.py | 96ff9f3304255d5dc9203ebbf8390c48410555e8 | [] | no_license | ericyoary/wedplanner_project | 0fcf30bdee7b13fe9b43e7a961dda85598f55e9c | 83af35abd33a0db84a359b9aed69950bd4a6641a | refs/heads/master | 2022-12-25T08:25:18.608476 | 2020-09-26T19:36:09 | 2020-09-26T19:36:09 | 298,885,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | from django.apps import AppConfig
class WedappConfig(AppConfig):
name = 'wedapp'
| [
"ericyoary@gmail.com"
] | ericyoary@gmail.com |
a6dcdfd8af06b62255f6da6c0506963344aa4643 | 413ba764a64cea4da09b51c426c3715fd7c79f60 | /contacts/views.py | 62ef94778802154fccbd7d4d20b073d74b1af0ec | [] | no_license | luqmanshof/carzone-app | 1a308f77dbde6b8ab80cd4458afb31cff6479d2f | d7e4e283a55ce7c1eb3cd74766c482b8ae773bf6 | refs/heads/master | 2022-12-25T03:04:01.539645 | 2020-10-04T06:42:51 | 2020-10-04T06:42:51 | 295,472,107 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | from django.shortcuts import render
# Create your views here.
def inquiry(request):
return
| [
"luqmanshof@gmail.com"
] | luqmanshof@gmail.com |
f59db1371af75f94b82190561a99278bcd02b079 | 551b75f52d28c0b5c8944d808a361470e2602654 | /huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/balancer_active_window.py | aeea9247f02f3b36a9f8fd0019a8e52731f28dcd | [
"Apache-2.0"
] | permissive | wuchen-huawei/huaweicloud-sdk-python-v3 | 9d6597ce8ab666a9a297b3d936aeb85c55cf5877 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | refs/heads/master | 2023-05-08T21:32:31.920300 | 2021-05-26T08:54:18 | 2021-05-26T08:54:18 | 370,898,764 | 0 | 0 | NOASSERTION | 2021-05-26T03:50:07 | 2021-05-26T03:50:07 | null | UTF-8 | Python | false | false | 3,518 | py | # coding: utf-8
import pprint
import re
import six
class BalancerActiveWindow:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'start_time': 'str',
'stop_time': 'str'
}
attribute_map = {
'start_time': 'start_time',
'stop_time': 'stop_time'
}
def __init__(self, start_time=None, stop_time=None):
"""BalancerActiveWindow - a model defined in huaweicloud sdk"""
self._start_time = None
self._stop_time = None
self.discriminator = None
self.start_time = start_time
self.stop_time = stop_time
@property
def start_time(self):
"""Gets the start_time of this BalancerActiveWindow.
活动时间窗开始时间。
:return: The start_time of this BalancerActiveWindow.
:rtype: str
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this BalancerActiveWindow.
活动时间窗开始时间。
:param start_time: The start_time of this BalancerActiveWindow.
:type: str
"""
self._start_time = start_time
@property
def stop_time(self):
"""Gets the stop_time of this BalancerActiveWindow.
活动时间窗结束时间。
:return: The stop_time of this BalancerActiveWindow.
:rtype: str
"""
return self._stop_time
@stop_time.setter
def stop_time(self, stop_time):
"""Sets the stop_time of this BalancerActiveWindow.
活动时间窗结束时间。
:param stop_time: The stop_time of this BalancerActiveWindow.
:type: str
"""
self._stop_time = stop_time
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BalancerActiveWindow):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"hwcloudsdk@huawei.com"
] | hwcloudsdk@huawei.com |
af24e5ccf9f09045985f9aa0dea79df89ac85ffc | 5bd61abada0114a7e9ebfb94b76128a6f3ea39f7 | /ic_uber/interface/ic_uber/selecting/selecting/urls.py | 5fc353da9886f41455a8d7165cb55d03a37cf36d | [] | no_license | karolinefirmino/django_code | 9606a2938f5631094b6ef16c405a9d74943f2902 | ce51d77941c178b853b1d9db2c66ebfd8d5c06ef | refs/heads/master | 2023-03-19T01:08:26.387127 | 2021-03-11T04:38:27 | 2021-03-11T04:38:27 | 346,559,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | """selecting URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"krodrigues.firmino2@gmail.com"
] | krodrigues.firmino2@gmail.com |
3e7d9c7e6635b42e3e10f0df9f483df2f81f36b1 | 8638cea2e2763059ae58f8ed38b2e19a68a298e4 | /interrupt_test.py | de04eec0cf43cd02ee42df46a24a9a11365380be | [] | no_license | yjyGo/Adversarial-dialogue-generation-practice | 6d61f22cb9468d8ddecd53a3c2e25d344e57057f | cabea346dd3c0f9700b2fea2331a79b9f08dc2f3 | refs/heads/master | 2020-04-28T20:03:49.711779 | 2018-06-29T12:04:46 | 2018-06-29T12:04:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | import time
def train():
print("training...")
time.sleep(5)
def update_params():
print("Params updated.")
def test():
interrupt_flag = False
while True:
if interrupt_flag:
print("Stop training.")
break
else:
try:
train()
update_params()
# print("you can stop training.")
# time.sleep(5)
# print("Continue training...")
except KeyboardInterrupt:
interrupt_flag = True
test()
| [
"noreply@github.com"
] | yjyGo.noreply@github.com |
07cfd1607796d3ca94ad028d3b8c573a0d32cc3b | 4f998e9798b5d72a508a62013d8179e58d94b8bb | /home/migrations/0001_load_initial_data.py | 592ecd278b5ad39a6095474d97880b4060026301 | [] | no_license | crowdbotics-apps/testcerabc-27781 | 72437420dc97964cfd2c882f723f6e8dc4177fe8 | a58dc42415d0c2c7a523a8b9566f3a64b20a6164 | refs/heads/master | 2023-05-12T14:34:46.264425 | 2021-06-06T18:47:08 | 2021-06-06T18:47:08 | 374,438,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 538 | py | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "testcerabc-27781.botics.co"
site_params = {
"name": "testcerabc",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
5b1852f62edd872800d72e216076ab51522488bb | c2895dab3daebdfe4b784a54acb7018b13fd26d2 | /desktop/python/components/gui.py | fd4b2c60ca1943bb59ca6e61c8a7b69544322091 | [
"MIT",
"CC-BY-4.0"
] | permissive | zhibirc/stihi-ru-backupper | bcaacf0118ae94499e6df769038dc34cd67cbe2d | 5917c4daabeaa0fa5ff73d3b093412fd94aa7e24 | refs/heads/master | 2023-03-05T23:55:14.894830 | 2021-07-07T18:26:03 | 2021-07-07T18:26:03 | 245,217,481 | 1 | 0 | MIT | 2023-03-04T07:30:27 | 2020-03-05T16:46:41 | JavaScript | UTF-8 | Python | false | false | 20 | py | class Gui:
pass
| [
"ysurilov@lohika.com"
] | ysurilov@lohika.com |
a01c00efe0655a44e52dd6b20bc4f983e908aa2f | 2695c818b0e1f9fc8f2d09d0bd29614352a7b306 | /edge_detection.py | f232419173f634a55a1b0d14e98d4c7cf9a85944 | [] | no_license | singhamritanshu/open_cv_python_tutorial | 6b9407f811d410c442797f179f02bbf1e9c7f53c | 18e1e0845c690f1988da2af6c7237bb3cdbe2013 | refs/heads/master | 2023-08-19T14:26:01.041380 | 2021-10-08T18:06:45 | 2021-10-08T18:06:45 | 336,986,832 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 874 | py | import cv2 as cv
import numpy as np
img = cv.imread("test_images/boston.png")
cv.imshow("Original",img)
gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
cv.imshow("Gray", gray)
# Canny
canny = cv.Canny(gray,155,255)
cv.imshow("Canny",canny)
# Laplacian
lap = cv.Laplacian(gray,cv.CV_64F) # CV_64F is the datadepth(ddepth) value.
lap = np.uint8(np.absolute(lap))# Image pixel cannot have a negative value so we calculate absolute value and then we convert it to image specific data type i.e. uint8.
cv.imshow("Laplacian",lap)
# Sobel: It computes the gradient in two direction i.e x & y.
sobel_x = cv.Sobel(gray,cv.CV_64F,1,0) # dx=1,dy=0
cv.imshow("Sobel X",sobel_x)
sobel_y = cv.Sobel(gray,cv.CV_64F,0,1)
cv.imshow("Sobel Y",sobel_y)
# Combining both sobel_x & sobel_y
combined_sobel = cv.bitwise_or(sobel_x,sobel_y)
cv.imshow("Combined Sobel", combined_sobel)
cv.waitKey(0) | [
"64220796+singhamritanshu@users.noreply.github.com"
] | 64220796+singhamritanshu@users.noreply.github.com |
bba0e79bf7baee38028134d639dce9bb7633f07b | 8cb0d8bc728464ac7ef71494ce3e2d6891ed0133 | /venv/Scripts/pip3-script.py | 9ffe9df0b05579f9a65a1e306e36558a28c79031 | [] | no_license | angelaTv/Luminar-Python | a54f7e4766581e312a61a4064774a7303e1bea07 | 4093146e18fa66d329691bca9b2f087fdb63bc90 | refs/heads/master | 2020-12-08T11:46:41.803350 | 2020-01-22T09:59:50 | 2020-01-22T09:59:50 | 232,973,735 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | #!C:\Luminar-Python\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"angelavrghs76@gmail.com"
] | angelavrghs76@gmail.com |
1e75bd9772cf9d30a85f55795d7beb12a4624398 | a04ce9bfac3cb2a2ce5f32c4ed7c975d255e50af | /libs/tasks.py | 6047a6077efb9739d17f456c8269b7333e57066a | [] | no_license | RakeshMallesh123/flask-starter | 2d898b872db994f19a415c6e7ccdc1e36c7ebc34 | 19204f21d30d5427ae979a702488643810562cf6 | refs/heads/master | 2022-09-29T07:20:52.507026 | 2019-07-19T16:47:43 | 2019-07-19T16:47:43 | 197,140,638 | 1 | 1 | null | 2022-09-16T18:11:22 | 2019-07-16T07:12:17 | Python | UTF-8 | Python | false | false | 717 | py | import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from celery_background import celery
@celery.task
def send_async_email(data):
if os.environ.get("SEND_EMAIL"):
message = Mail(
from_email=os.environ.get('SENDGRID_DEFAULT_FROM'),
to_emails=data['email'],
subject=data['subject'],
html_content=data['html'])
try:
sendgrid_client = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
response = sendgrid_client.send(message)
print('Email sent')
return True
except Exception as e:
print(e)
else:
print("Email sending is disabled")
| [
"rakesh@codetoart.com"
] | rakesh@codetoart.com |
49c88c9831700d7151ac0409c10f08e36d2cc1f8 | 3655a9cb779a288d153559e34d413f8566158f36 | /ex1_5_3.py | 26d52642cd876f69cc0da0317a747565a316facb | [] | no_license | GonMazzini/MachineLearning_DTU | e588fd3873503e44dbd349998ee8ea9181889467 | f08cca46929bcd4b63d08bb8d468f812910129ba | refs/heads/main | 2023-08-05T19:28:26.221216 | 2021-10-05T15:06:04 | 2021-10-05T15:06:04 | 399,391,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,481 | py | # exercise 1.5.3
import numpy as np
from scipy.io import loadmat
# You can load the matlab data (matlab's m-file) to Python environment with
# 'loadmat()' function imported from 'scipy.io' module.
# The matlab workspace is loaded as a dictionary, with keys corresponding to
# matlab variable names, and values to arrays representing matlab matrices.
# Load Matlab data file to python dict structure
iris_mat = loadmat('../Data/iris.mat', squeeze_me=True)
# The argument squeeze_me ensures that there the variables we get from the
# MATLAB filed are not stored within "unneeded" array dimensions.
# You can check which variables are in the loaded dict by calling
# the function keys() for the dict:
#mat_data.keys()
# this will tell you that X, y, M, N and C are stored in the dictionary,
# as well as some extra information about e.g. the used MATLAB version.
# We'll extract the needed variables by using these keys:
X = iris_mat['X']
y = iris_mat['y']
M = iris_mat['M']
N = iris_mat['N']
C = iris_mat['C']
attributeNames = iris_mat['attributeNames']
classNames = iris_mat['classNames']
# Loading the Iris data from the .mat-file was quite easy, because all the work
# of putting it into the correct format was already done. This is of course
# likely not the case for your own data, where you'll need to do something
# similar to the two previous exercises. We will, however, sometimes in the
# course use .mat-files in the exercises. | [
"noreply@github.com"
] | GonMazzini.noreply@github.com |
4d905cd191f636da17e610812a9398e3eae689d3 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-4255.py | e4efe63d4df69c3a17f2b7294ac199c649e5d2fb | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,757 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [$Type] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
01aa0cb5b3fc74abb677dc0ee9eb917630e512c3 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/v20210301/security_partner_provider.py | 4e4a82226720393cf0603bb544fee7d22d602c3a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,291 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SecurityPartnerProviderArgs', 'SecurityPartnerProvider']
@pulumi.input_type
class SecurityPartnerProviderArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
security_partner_provider_name: Optional[pulumi.Input[str]] = None,
security_provider_name: Optional[pulumi.Input[Union[str, 'SecurityProviderName']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_hub: Optional[pulumi.Input['SubResourceArgs']] = None):
"""
The set of arguments for constructing a SecurityPartnerProvider resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] security_partner_provider_name: The name of the Security Partner Provider.
:param pulumi.Input[Union[str, 'SecurityProviderName']] security_provider_name: The security provider name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input['SubResourceArgs'] virtual_hub: The virtualHub to which the Security Partner Provider belongs.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if security_partner_provider_name is not None:
pulumi.set(__self__, "security_partner_provider_name", security_partner_provider_name)
if security_provider_name is not None:
pulumi.set(__self__, "security_provider_name", security_provider_name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_hub is not None:
pulumi.set(__self__, "virtual_hub", virtual_hub)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="securityPartnerProviderName")
def security_partner_provider_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Security Partner Provider.
"""
return pulumi.get(self, "security_partner_provider_name")
@security_partner_provider_name.setter
def security_partner_provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "security_partner_provider_name", value)
@property
@pulumi.getter(name="securityProviderName")
def security_provider_name(self) -> Optional[pulumi.Input[Union[str, 'SecurityProviderName']]]:
"""
The security provider name.
"""
return pulumi.get(self, "security_provider_name")
@security_provider_name.setter
def security_provider_name(self, value: Optional[pulumi.Input[Union[str, 'SecurityProviderName']]]):
pulumi.set(self, "security_provider_name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The virtualHub to which the Security Partner Provider belongs.
"""
return pulumi.get(self, "virtual_hub")
@virtual_hub.setter
def virtual_hub(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "virtual_hub", value)
class SecurityPartnerProvider(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
security_partner_provider_name: Optional[pulumi.Input[str]] = None,
security_provider_name: Optional[pulumi.Input[Union[str, 'SecurityProviderName']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
"""
Security Partner Provider resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] security_partner_provider_name: The name of the Security Partner Provider.
:param pulumi.Input[Union[str, 'SecurityProviderName']] security_provider_name: The security provider name.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_hub: The virtualHub to which the Security Partner Provider belongs.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SecurityPartnerProviderArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Security Partner Provider resource.
:param str resource_name: The name of the resource.
:param SecurityPartnerProviderArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SecurityPartnerProviderArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
security_partner_provider_name: Optional[pulumi.Input[str]] = None,
security_provider_name: Optional[pulumi.Input[Union[str, 'SecurityProviderName']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_hub: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SecurityPartnerProviderArgs.__new__(SecurityPartnerProviderArgs)
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["security_partner_provider_name"] = security_partner_provider_name
__props__.__dict__["security_provider_name"] = security_provider_name
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_hub"] = virtual_hub
__props__.__dict__["connection_status"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:network:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20200301:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20200401:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20200501:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20200601:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20200701:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20200801:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20201101:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20210201:SecurityPartnerProvider"), pulumi.Alias(type_="azure-native:network/v20210501:SecurityPartnerProvider")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SecurityPartnerProvider, __self__).__init__(
'azure-native:network/v20210301:SecurityPartnerProvider',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SecurityPartnerProvider':
"""
Get an existing SecurityPartnerProvider resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SecurityPartnerProviderArgs.__new__(SecurityPartnerProviderArgs)
__props__.__dict__["connection_status"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["security_provider_name"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_hub"] = None
return SecurityPartnerProvider(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> pulumi.Output[str]:
"""
The connection status with the Security Partner Provider.
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the Security Partner Provider resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="securityProviderName")
def security_provider_name(self) -> pulumi.Output[Optional[str]]:
"""
The security provider name.
"""
return pulumi.get(self, "security_provider_name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The virtualHub to which the Security Partner Provider belongs.
"""
return pulumi.get(self, "virtual_hub")
| [
"noreply@github.com"
] | bpkgoud.noreply@github.com |
b3b8eb91fa66a2775490954f8c3ff2b4d06a219f | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_critics.py | daa20c16b7ffcfd31864b2f9e82bd272a677bdae | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.nouns._critic import _CRITIC
#calss header
class _CRITICS(_CRITIC, ):
def __init__(self,):
_CRITIC.__init__(self)
self.name = "CRITICS"
self.specie = 'nouns'
self.basic = "critic"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.