blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fb2284f845ce19ebcf45f5dabcddb4a4493f11d8 | bfcbc9d2e294d331ab79d088f8e99a33660db838 | /e10/wikipedia_popular.py | 535ec284020f1c5a86bada02db4b8a6dfa2992dd | [] | no_license | aauutthh/data-science | f852cef84d0cef7bcc1edb7bbb5dde5de9391190 | 20ab084e4c3f8429603b2fb6711b9ecc779be089 | refs/heads/master | 2020-03-22T20:55:39.004103 | 2017-11-21T07:09:07 | 2017-11-21T07:09:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,806 | py | #!/usr/bin/python3
# wikipedia_popular.py
# CMPT 318 Exercise 10 - Popular Wikipedia Pages
# Alex Macdonald
# ID#301272281
# November 17, 2017
import sys
from pyspark.sql import SparkSession, functions, types
spark = SparkSession.builder.appName('popular wikipedia pages').getOrCreate()
assert sys.version_info >= (3, 4) # make sure we have Python 3.4+
assert spark.version >= '2.1' # make sure we have Spark 2.1+
schema = types.StructType([
types.StructField('language', types.StringType(), False),
types.StructField('pagename', types.StringType(), False),
types.StructField('viewcount', types.LongType(), False)
])
def get_filename_from_path(path):
filename = path[path.rfind('/'):] # After this, the string will be /pagecounts-(what we want)0000
filename = filename[12:23] # By means of voodoo hack-fu, here's a substring that returns the string we want
return filename
def main(in_directory, out_directory):
# Thanks to ggbaker for the hint to use sep & input_file_name & udf
pages = spark.read.csv(in_directory, sep=' ', schema=schema).withColumn('filename', functions.input_file_name()).cache()
# from the filename column, get the hour and then remove filename because we don't need it anymore
path_to_hour = functions.udf(get_filename_from_path, returnType=types.StringType())
pages = pages.withColumn('hour', path_to_hour(pages.filename))
pages = pages.drop('filename')
# We're interested in:
# 1. English Wikipedia pages (i.e., language is 'en')
pages = pages.filter("language = 'en'")
pages = pages.drop('language')
# 2. Exclude 'Main_Page'
pages = pages.filter("pagename != 'Main_Page'")
# 3. Exclude 'Special:'
# Learned to use the tilde for negation from:
# https://stackoverflow.com/questions/40743915/filtering-string-in-pyspark
pages = pages.filter(~pages.pagename.contains('Special:'))
# .. need to find the largest number of page views in each hour
# Create a new dataframe to hold the aggregated hours & max views
max_views = pages.groupby('hour').agg(functions.max('viewcount').alias('max'))
# .. then join that back to the collection of all page pagecounts
pages = pages.join(max_views, 'hour')
# .. only keep those with count == max(count) for that hour
pages = pages.filter(pages.viewcount == pages.max)
# sort by date/hour and page name in the event of a tie
pages = pages.sort('hour', 'pagename')
# make sure the results are in the order as shown in the assignment sample output
pages = pages.select('hour', 'pagename', 'viewcount')
# Write results to directory
pages.write.csv(out_directory, mode='overwrite')
if __name__=='__main__':
in_directory = sys.argv[1]
out_directory = sys.argv[2]
main(in_directory, out_directory) | [
"apmacdon@sfu.ca"
] | apmacdon@sfu.ca |
531867291a2f847a938e5a694218f1e32d44c886 | 7502c3ac3b9b6815d585b7812030c897fe75c56c | /blog/form.py | ebb2e8448c2b5e7f9d5715641626b93d5de2a843 | [] | no_license | Alexleslie/First-Blog-at-Life | cbc2c5499bc913712b4419640f2b43f2152553ef | e02b3ee0dedecc03cb764875dd4c81f4642c4f4b | refs/heads/master | 2023-01-04T18:41:55.359556 | 2017-11-03T08:27:12 | 2017-11-03T08:27:12 | 108,284,432 | 0 | 0 | null | 2020-11-04T05:57:23 | 2017-10-25T14:50:23 | JavaScript | UTF-8 | Python | false | false | 448 | py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
class SearchForm(forms.Form):
body = forms.TextInput()
class PostForm(forms.Form):
body = forms.TextInput()
class CommentForm(forms.Form):
body = forms.CharField()
class RegisterForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'email', 'password1','password2')
| [
"577045156@qq.com"
] | 577045156@qq.com |
0adebccf114feaf7d54ef0db353bf64f5bd716b7 | 6d26869b0668a4ce70665099717463c549064a3e | /hackerhank/security/security-message-space-and-ciphertext-space.py | c01990271959d31abe227eee62dcf89bc836ad0a | [] | no_license | wesckeley/competitive-programming | 3b8f1ea8089d82bcb28765230114b93eb7534560 | c93d478b775d6d735741e4149a6ddbd8b51da218 | refs/heads/master | 2020-03-07T12:35:59.233646 | 2018-06-13T15:52:07 | 2018-06-13T15:52:07 | 127,479,051 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | line = input()
answer = ""
for i in range(0,len(line)):
answer += chr(((int(line[i]) + 1) % 10) + ord('0'))
print(answer) | [
"wesckeley.martins@medpej.local"
] | wesckeley.martins@medpej.local |
36dad26e1bf89e1f0c9698c64e31fcf54f3fc7c0 | 37d9bb2869fe491a67c97de6adc3e0e1693ff82a | /StringMethods.py | 6d8af7c493506723fa8c73866f566114291010f2 | [] | no_license | qinyanjuidavid/Self-Learning | ffbcb62f2204c5dedd0cde3a116b653da77b3702 | 8c6a5b3a7f250af99538e9f23d01d8a09839b702 | refs/heads/master | 2023-03-31T10:20:21.225642 | 2021-04-03T08:04:27 | 2021-04-03T08:04:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | #strip(), len(),lower(),upper(),split()
name=" Hello "
print(name.strip())#It removes all the spaces in a string
nums=" 1 2 3 1 4 "
print(nums.strip())#The spaces removed are before and after
print(len(name)) #Checks the length of the string
name="JOHN DOE"
print(name.lower()) #changes the strings to be in lower case
name="jane Doe"
print(name.upper()) #Changes the string to upper case
name="JOHN DOE"
print(name.split()) #Changes the string to a string
print(type(name.split()))
#Count() and find() methodss
s="Hello"
print(s.find('o'))
print(s.find('l'))
print(s.find('s'))#Python does not find s
print(s.count('h'))
print(s.count('l'))
print(s.count('z'))#Zero 'z'
| [
"davidkinyanjui052@gmail.com"
] | davidkinyanjui052@gmail.com |
3c523190371a13698b667d7787e278397db29d43 | e837cc9a201a8bd88f3ef31817406930861ff99b | /GUI/RibbonTab.py | cc0cdd1e8f1ec5c1a686c9571396f7d09b26debb | [
"MIT"
] | permissive | fivecountry/QupyRibbon | f1a36a89d0061cc6fcba081156698ba0a7543a4e | c5ed71ca94e07a243e758f3980aedb5fa4f3e3b1 | refs/heads/master | 2022-12-10T08:31:25.322325 | 2020-09-08T08:46:25 | 2020-09-08T08:46:25 | 293,543,055 | 0 | 0 | MIT | 2020-09-07T13:58:00 | 2020-09-07T13:57:59 | null | UTF-8 | Python | false | false | 710 | py | from PyQt5.QtWidgets import *
from PyQt5.QtCore import Qt
from GUI.RibbonPane import RibbonPane
class RibbonTab(QWidget):
def __init__(self, parent, name):
QWidget.__init__(self, parent)
layout = QHBoxLayout()
self.setLayout(layout)
layout.setContentsMargins(0, 0, 0, 0)
layout.setSpacing(0)
layout.setAlignment(Qt.AlignLeft)
def add_ribbon_pane(self, name):
ribbon_pane = RibbonPane(self, name)
self.layout().addWidget(ribbon_pane)
return ribbon_pane
def add_spacer(self):
self.layout().addSpacerItem(QSpacerItem(1, 1, QSizePolicy.MinimumExpanding))
self.layout().setStretch(self.layout().count() - 1, 1)
| [
"magnusmj@gmail.com"
] | magnusmj@gmail.com |
50e67afae23cc712e3eb4ee47d120b1ea3ac0c1c | 13e89f39f95b42305311b5a8db04f748d504e5d7 | /1C repeat_divergence/LTR/ltr_insert_time/0523_stat_kindom_insert_time_species_max.py | 4a3a6b6db3a6dc79fa73e472d8672d20c316968e | [] | no_license | 352166594/PlantRep | 3c9d2f9819a0cbadebe62cc50ce882d5b6eb3f96 | 41be5253300d00d37bf93c2de6eb07a6f90d4c2e | refs/heads/master | 2022-12-07T10:08:56.070887 | 2020-09-02T06:22:21 | 2020-09-02T06:22:21 | 276,773,682 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,219 | py | f1=open("merge_0522_count_insert_time.xls")
#f1=open("merge_0317_count_insert_time.xls")
#f1=open("merge_0317_72_count_insert_time.xls")
f2=open("0523type_count_max_ltr_inserttime.xls","w")
dic = {}
list_Interval= []
list_Species = []
for lin in f1:
lis=lin.strip().split("\t")
if "Species_type" in lin:
# f2.write("Interval\tTemporal\tLTR_nummber\tFigure_order\tSpecies_type\n")
#1-2 Ten_thousand 1 1 07_Magnoliids
# for i in lis[5:]:
# list_Interval.append(i)
list_Interval=lis[5:]
else:
Species_type = lis[0]
if Species_type not in dic:
dic[Species_type] = {}
list_Species.append(Species_type)
# total_stat = "total"
# dic[total_stat] = {}
time_lis = lis[5:]
time_type = list_Interval[time_lis.index(max(time_lis))]
# for j in range(len(time_lis)):
# time_type = list_Interval[j]
if time_type not in dic[Species_type]:
dic[Species_type][time_type] = 1#max(time_lis)#time_lis[time_type]
elif time_type in dic[Species_type]:
dic[Species_type][time_type] = dic[Species_type][time_type] + 1#max(time_lis)#time_lis[time_type]
f2.write("Interval\tTemporal\tLTR_nummber\tFigure_order\tSpecies_type\n")
for n in list_Species:
# if n == '01_Alage':
# print(n)
# file_name = n+"_max_ltr_inserttime.xls"
# fn=open(file_name,"w")
#f2.write("Interval\tTemporal\tLTR_nummber\tFigure_order\tSpecies_type\n")
# fn.write("Interval\tTemporal\tLTR_nummber\tFigure_order\tSpecies_type\n")
for m in range(len(list_Interval)):
if "Million" in list_Interval[m]:
Interval=list_Interval[m].split("Million")[0]
Temporal="Million"
elif "Hundred_thousand" in list_Interval[m]:
Interval=list_Interval[m].split("Hundred_thousand")[0]
Temporal="Hundred_thousand"
# elif "Ten_thousand" in list_Interval[m]:
# Interval=list_Interval[m].split("Ten_thousand")[0]
# Temporal="Ten_thousand"
if list_Interval[m] in dic[n]:
LTR_nummber=dic[n][list_Interval[m]]
else:
LTR_nummber=0
Figure_order = m+1
Species_type = n
f2.write("{0}\t{1}\t{2}\t{3}\t{4}\n".format(Interval,Temporal,LTR_nummber,Figure_order,Species_type))
# fn.write("{0}\t{1}\t{2}\t{3}\t{4}\n".format(Interval,Temporal,LTR_nummber,Figure_order,Species_type))
#fn.close()
| [
"352166594@qq.com"
] | 352166594@qq.com |
dddfaca9ee48680f390c8e6ceafece535b9459ab | ab3267945da8792d02014501e3652fe1f6c7e3da | /setup.py | f4ae9d6de70a5746493ab954d1655544e67cb952 | [
"MIT"
] | permissive | tomnaumann/awesomedoc | f7c1383cff115c594f8888b40b5c15f50218a2fe | 52ab4ce6e54a8f472a64bd9a75f861d1a9972cd1 | refs/heads/master | 2021-04-07T02:47:06.277255 | 2020-03-29T22:51:30 | 2020-03-29T22:54:23 | 248,637,697 | 0 | 0 | MIT | 2020-03-29T22:54:24 | 2020-03-20T01:01:53 | Python | UTF-8 | Python | false | false | 629 | py | from distutils.core import setup
setup(
name='awesomedoc',
packages=['awesomedoc'],
version='0.0.4.alpha',
license='MIT',
description='Generate simple markdown from python scripts',
author='Tom Naumann',
author_email='tom.naumann.95@gmail.com',
url="https://github.com/tomnaumann/awesomedoc",
keywords=['markdown', 'awesomedoc', 'documentation'],
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
]
)
| [
"54036949+tomnaumann@users.noreply.github.com"
] | 54036949+tomnaumann@users.noreply.github.com |
463770f0a9e2f6a0f60f0d712cc6d785165f5196 | 07da9531043420a762107d1cd4c939e2fd62e633 | /citpayui.py | 5c50090233c90d5a680c4a9de2085d7c94155459 | [] | no_license | voroge/citpay | 34127bd4c983b68fded006eb8333b91dc3f18e18 | 37cf5c9158795aa6cbc36709b0a6037c19f9b61f | refs/heads/master | 2020-03-28T01:27:40.807986 | 2018-09-07T08:46:37 | 2018-09-07T08:46:37 | 147,506,614 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,091 | py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'citpayui.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(1117, 816)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
MainWindow.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("user32.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.groupBox = QtGui.QGroupBox(self.centralwidget)
self.groupBox.setMinimumSize(QtCore.QSize(0, 70))
self.groupBox.setBaseSize(QtCore.QSize(0, 0))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Verdana"))
self.groupBox.setFont(font)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.verticalLayout_3 = QtGui.QVBoxLayout(self.groupBox)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.label = QtGui.QLabel(self.groupBox)
self.label.setMinimumSize(QtCore.QSize(147, 0))
self.label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label.setObjectName(_fromUtf8("label"))
self.horizontalLayout.addWidget(self.label)
self.cbmonth = QtGui.QComboBox(self.groupBox)
self.cbmonth.setObjectName(_fromUtf8("cbmonth"))
self.cbmonth.addItem(_fromUtf8(""))
self.cbmonth.addItem(_fromUtf8(""))
self.cbmonth.addItem(_fromUtf8(""))
self.cbmonth.addItem(_fromUtf8(""))
self.cbmonth.addItem(_fromUtf8(""))
self.cbmonth.addItem(_fromUtf8(""))
self.cbmonth.addItem(_fromUtf8(""))
self.cbmonth.addItem(_fromUtf8(""))
self.cbmonth.addItem(_fromUtf8(""))
self.cbmonth.addItem(_fromUtf8(""))
self.cbmonth.addItem(_fromUtf8(""))
self.cbmonth.addItem(_fromUtf8(""))
self.horizontalLayout.addWidget(self.cbmonth)
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.horizontalLayout.addWidget(self.label_2)
self.cbyear = QtGui.QComboBox(self.groupBox)
self.cbyear.setObjectName(_fromUtf8("cbyear"))
self.cbyear.addItem(_fromUtf8(""))
self.cbyear.addItem(_fromUtf8(""))
self.cbyear.addItem(_fromUtf8(""))
self.cbyear.addItem(_fromUtf8(""))
self.cbyear.addItem(_fromUtf8(""))
self.horizontalLayout.addWidget(self.cbyear)
self.label_3 = QtGui.QLabel(self.groupBox)
self.label_3.setText(_fromUtf8(""))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.horizontalLayout.addWidget(self.label_3)
self.verticalLayout_3.addLayout(self.horizontalLayout)
self.verticalLayout.addWidget(self.groupBox)
self.btnstart = QtGui.QPushButton(self.centralwidget)
self.btnstart.setObjectName(_fromUtf8("btnstart"))
self.verticalLayout.addWidget(self.btnstart)
self.tvdata = QtGui.QTableView(self.centralwidget)
self.tvdata.setObjectName(_fromUtf8("tvdata"))
self.verticalLayout.addWidget(self.tvdata)
self.progressBar = QtGui.QProgressBar(self.centralwidget)
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.verticalLayout.addWidget(self.progressBar)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1117, 31))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.cbmonth.setCurrentIndex(6)
self.cbyear.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Дисциплина предоставления платы граждан за КУ", None))
self.groupBox.setTitle(_translate("MainWindow", "Отчетный период", None))
self.label.setText(_translate("MainWindow", "Месяц :", None))
self.cbmonth.setItemText(0, _translate("MainWindow", "январь", None))
self.cbmonth.setItemText(1, _translate("MainWindow", "февраль", None))
self.cbmonth.setItemText(2, _translate("MainWindow", "март", None))
self.cbmonth.setItemText(3, _translate("MainWindow", "апрель", None))
self.cbmonth.setItemText(4, _translate("MainWindow", "май", None))
self.cbmonth.setItemText(5, _translate("MainWindow", "июнь", None))
self.cbmonth.setItemText(6, _translate("MainWindow", "июль", None))
self.cbmonth.setItemText(7, _translate("MainWindow", "август", None))
self.cbmonth.setItemText(8, _translate("MainWindow", "сентябрь", None))
self.cbmonth.setItemText(9, _translate("MainWindow", "октябрь", None))
self.cbmonth.setItemText(10, _translate("MainWindow", "ноябрь", None))
self.cbmonth.setItemText(11, _translate("MainWindow", "декабрь", None))
self.label_2.setText(_translate("MainWindow", "Год :", None))
self.cbyear.setItemText(0, _translate("MainWindow", "2016", None))
self.cbyear.setItemText(1, _translate("MainWindow", "2017", None))
self.cbyear.setItemText(2, _translate("MainWindow", "2018", None))
self.cbyear.setItemText(3, _translate("MainWindow", "2019", None))
self.cbyear.setItemText(4, _translate("MainWindow", "2020", None))
self.btnstart.setText(_translate("MainWindow", "Сформировать отчет", None))
| [
"voroge@yandex.ru"
] | voroge@yandex.ru |
1179ed1a0a4a8b465f26500da471f61dec3bfdb5 | 5251a6be594dff7e56bbe6b4f968ea43c3315471 | /atoll/config.py | 5dd83a5e843ac4f91ecf18cf5ba16b102eadb80f | [
"Apache-2.0"
] | permissive | coralproject/atoll | aec2e529fd7c5164864c4a2e9a501a8477fc3872 | 2b62b37d3a320480264c4a0242532aad99c338ec | refs/heads/master | 2021-07-14T03:39:09.761086 | 2016-07-26T18:57:16 | 2016-07-26T18:57:16 | 43,079,410 | 12 | 1 | NOASSERTION | 2021-03-19T21:53:15 | 2015-09-24T16:37:32 | Python | UTF-8 | Python | false | false | 437 | py |
"""
Loads the service configuration.
"""
import os
import yaml
conf = {
'worker_broker': 'amqp://guest:guest@localhost/',
'worker_backend': 'amqp',
'executor_host': '127.0.0.1:8786'
}
user_conf_path = os.environ.get('ATOLL_CONF', None)
if user_conf_path is not None:
with open(user_conf_path, 'r') as f:
conf.update(yaml.load(f))
namespace = globals()
for k, v in conf.items():
namespace[k.upper()] = v
| [
"f+accounts@frnsys.com"
] | f+accounts@frnsys.com |
b56441936dac3b31ed4d7d74d4eeaee5a4198c57 | 773d360967b59d1b32a80d9fe7951807310cc1a4 | /WEB APPLICATION/KurdActivite.py | 9e41d139407b9f7eed84a3e305c725e1a637c530 | [] | no_license | KurdActivite/KurdActivite | 51ef764a09f42f505cb440461c9c56810e0bee9f | 2cf5852d267a1bcd259daa1e7d11cd34f37fca96 | refs/heads/master | 2020-04-07T21:35:37.570703 | 2018-11-22T18:01:21 | 2018-11-22T18:01:21 | 158,733,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 492 | py | from flask import Flask, render_template
app = Flask(__name__, template_folder='KuTE', static_folder='KuST')
@app.route('/')
@app.route('/Home')
def home():
return render_template('HHome.html', name='Home')
@app.route('/Contact')
def contact():
return render_template('CContact.html', name='Contact')
@app.route('/About')
def about():
return render_template('AAbout.html', name='About')
if __name__ == '__main__':
app.run(debug=True ,port=12323) | [
"noreply@github.com"
] | KurdActivite.noreply@github.com |
390cba67ac35bf192a1f7e1dfaecfd4db708e018 | 77ffad31eff3fc06643b64aa09956e5160864b10 | /תרגילים המהצגת/5.2.py | 0f55a9ea7af715734ad00aac2c59ca45d8fc7801 | [] | no_license | roeisavion/roeisproject2 | c2835d1c59da13bf7fcc7f6c36cad958e27fd319 | 92356e5d1807fb2034f943746ec955665e37759c | refs/heads/master | 2022-11-27T23:14:17.346595 | 2020-08-05T06:57:39 | 2020-08-05T06:57:39 | 279,581,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43 | py | a=input("enter long string")
print(a[::-1]) | [
"roeisavion@gmail.com"
] | roeisavion@gmail.com |
cd41d985c603ed0a4723965bfa70df8a138d1f06 | f95d2646f8428cceed98681f8ed2407d4f044941 | /FlaskDemo04/run01.py | 97f6b559d276571f0ef32a6508c258ebd248ba6e | [] | no_license | q2806060/python-note | 014e1458dcfa896f2749c7ebce68b2bbe31a3bf8 | fbe107d668b44b78ae0094dbcc7e8ff8a4f8c983 | refs/heads/master | 2020-08-18T01:12:31.227654 | 2019-10-17T07:40:40 | 2019-10-17T07:40:40 | 215,731,114 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
#导入pymysql用来替代MySQLdb
import pymysql
# pymysql.install_as_MySQLdb()
app = Flask(__name__)
#为app指定连库字符
app.config['SQLALCHEMY_DATABASE_URI']="mysql+pymysql://root:123456@localhost:3306/flask"
#取消SQLAlchemy的信号追踪
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
#创建SQLAlchemy程序实例
db = SQLAlchemy(app)
if __name__ == "__main__":
app.run(debug=True)
| [
"C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn"
] | C8916BA958F57D5A740E38E94644A3F8@i-search.com.cn |
d4d2e6a769a474a32be20f7fc3ca5feb69c8f19c | 4c87cb1ffb78dec4bc1cb0d73d9d525e0c847e97 | /doc2vec/revlab_getvect60.py | a96f86185f0f66f0337572f95b8d084f7bb0d37a | [] | no_license | hangandrew/yelp_review_images | 2e88b28c1e853fafed571860bf42d5e37cce3f7e | d04f2cdb70540df78feebc12702f511acca998ea | refs/heads/master | 2021-02-26T13:14:12.087265 | 2020-09-28T03:36:17 | 2020-09-28T03:36:17 | 245,527,798 | 0 | 1 | null | 2020-07-16T01:24:41 | 2020-03-06T22:26:44 | Python | UTF-8 | Python | false | false | 2,212 | py | #!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
import pandas as pd
import numpy
import logging
import gensim
import random
import sklearn
import collections
import os
from multiprocessing import Pool
import itertools
from gensim.models.doc2vec import Doc2Vec
import random
class generate_vect:
def __init__(self):
self.count = 0
self.model60= Doc2Vec.load("d2v60/d2v60.model")
print('Model with Vector size 60 loaded')
self.model60.random.seed(0)
result1 = pd.read_pickle('results1.pkl')
chunks = [result1[0:20000],result1[20000:40000],result1[40000:60000],result1[60000:80000],result1[80000:100000],
result1[100000:120000],result1[120000:140000],result1[140000:160000],result1[160000:180000],result1[180000:200000],
result1[200000:220000],result1[220000:240000],result1[240000:260000],result1[260000:280000],result1[280000:300000],
result1[300000:320000],result1[320000:340000],result1[340000:360000],result1[360000:380000],result1[380000:400000],
result1[400000:420000],result1[420000:440000],result1[440000:460000],result1[460000:480000],result1[480000:500000],
result1[500000:520000],result1[520000:540000],result1[540000:560000],result1[560000:580000],result1[580000:600000],
result1[600000:640000],result1[640000:681526]]
# number of reviews = 681526
pool = Pool(processes = 32) # number of processes = 32, since there are 32 chunks
self.results = pool.map(self.parse_reviews, chunks) # pass these 32 chunks to parse_reviews to parallelly process the chunks
self.result_flat = list(itertools.chain(*self.results))
def parse_reviews(self, test_subjects):
result = []
for id_, review in zip(test_subjects.rid, test_subjects.text):
print(self.count,id_)
self.count = self.count + 1
result.append([id_, self.model60.infer_vector(review)])
return result
def print_vect(self):
df = pd.DataFrame(self.result_flat, columns=('review_id','vectors'))
df.to_pickle('revlab_vect_reviews.pkl')
df.to_csv('revlab_vect_reviews.csv')
if __name__ == "__main__":
obj = generate_vect()
obj.print_vect()
print('Done')
| [
"noreply@github.com"
] | hangandrew.noreply@github.com |
9f8c4569973317f1d4012e8eb9299a38aae2c3ac | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/sieve-big-2820.py | 40ba804c3b66a2d92304552027939b5501509d12 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31,740 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A resizable list of integers
class Vector2(object):
items: [int] = None
items2: [int] = None
size: int = 0
size2: int = 0
def __init__(self:"Vector2"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector2") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector2") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector2") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector2", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector2", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector2", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector2", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector2", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector2", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector2", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector2", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector2") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector2") -> int:
return self.size
# A resizable list of integers
class Vector3(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
def __init__(self:"Vector3"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector3") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector3") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector3") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector3", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector3", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector3", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector3", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector3", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector3", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector3", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector3", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector3", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector3", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector3", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector3", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector3") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector3") -> int:
return self.size
# A resizable list of integers
class Vector4(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
def __init__(self:"Vector4"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector4") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector4") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def $ID(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector4") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector4", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector4", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector4", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector4", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector4", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector4", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector4", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector4", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector4", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector4", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector4", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector4", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector4", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector4", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector4") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector4") -> int:
return self.size
# A resizable list of integers
class Vector5(object):
items: [int] = None
items2: [int] = None
items3: [int] = None
items4: [int] = None
items5: [int] = None
size: int = 0
size2: int = 0
size3: int = 0
size4: int = 0
size5: int = 0
def __init__(self:"Vector5"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity2(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity3(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity4(self:"Vector5") -> int:
return len(self.items)
# Returns current capacity
def capacity5(self:"Vector5") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity2(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity3(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity4(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Increases capacity of vector by one element
def increase_capacity5(self:"Vector5") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector5", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append2(self:"Vector5", item: int, item2: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append3(self:"Vector5", item: int, item2: int, item3: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append4(self:"Vector5", item: int, item2: int, item3: int, item4: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends one item to end of vector
def append5(self:"Vector5", item: int, item2: int, item3: int, item4: int, item5: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector5", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all2(self:"Vector5", new_items: [int], new_items2: [int]) -> object:
item:int = 0
item2:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all3(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all4(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
for item in new_items:
self.append(item)
# Appends many items to end of vector
def append_all5(self:"Vector5", new_items: [int], new_items2: [int], new_items3: [int], new_items4: [int], new_items5: [int]) -> object:
item:int = 0
item2:int = 0
item3:int = 0
item4:int = 0
item5:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector5", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at2(self:"Vector5", idx: int, idx2: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at3(self:"Vector5", idx: int, idx2: int, idx3: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Removes an item from the middle of vector
def remove_at5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector5", idx: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get2(self:"Vector5", idx: int, idx2: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get3(self:"Vector5", idx: int, idx2: int, idx3: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get4(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int) -> int:
return self.items[idx]
# Retrieves an item at a given index
def get5(self:"Vector5", idx: int, idx2: int, idx3: int, idx4: int, idx5: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length2(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length3(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length4(self:"Vector5") -> int:
return self.size
# Retrieves the current size of the vector
def length5(self:"Vector5") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector2(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector2") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector3(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector3") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector4(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector4") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# A faster (but more memory-consuming) implementation of vector
class DoublingVector5(Vector):
doubling_limit:int = 1000
doubling_limit2:int = 1000
doubling_limit3:int = 1000
doubling_limit4:int = 1000
doubling_limit5:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity2(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity3(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity4(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Overriding to do fewer resizes
def increase_capacity5(self:"DoublingVector5") -> int:
if (self.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange2(i:int, j:int, i2:int, j2:int) -> Vector:
v:Vector = None
v2:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange3(i:int, j:int, i2:int, j2:int, i3:int, j3:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange4(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
def vrange5(i:int, j:int, i2:int, j2:int, i3:int, j3:int, i4:int, j4:int, i5:int, j5:int) -> Vector:
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve2(v:Vector, v2:Vector) -> object:
i:int = 0
i2:int = 0
j:int = 0
j2:int = 0
k:int = 0
k2:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve3(v:Vector, v2:Vector, v3:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
j:int = 0
j2:int = 0
j3:int = 0
k:int = 0
k2:int = 0
k3:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve4(v:Vector, v2:Vector, v3:Vector, v4:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
def sieve5(v:Vector, v2:Vector, v3:Vector, v4:Vector, v5:Vector) -> object:
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
j:int = 0
j2:int = 0
j3:int = 0
j4:int = 0
j5:int = 0
k:int = 0
k2:int = 0
k3:int = 0
k4:int = 0
k5:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
n2:int = 50
n3:int = 50
n4:int = 50
n5:int = 50
# Data
v:Vector = None
v2:Vector = None
v3:Vector = None
v4:Vector = None
v5:Vector = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
# Crunch
v = vrange(2, n)
v2 = vrange(2, n)
v3 = vrange(2, n)
v4 = vrange(2, n)
v5 = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
a469c0cfaa0b2ca34636bfd13dd96b1bcd5a6afc | e49a25c82d019234f0139fa24a8b72fd3adbefd6 | /Q.py | 21b38696caea91cd01b30a2c55a17eac38906359 | [] | no_license | Aggrron/SanyaProject | aeb8ff66f194959803c55bed3c27325675aff179 | e5f2e4e726112d8000f1a0fad608d3efd2867e65 | refs/heads/master | 2020-04-11T16:57:00.158341 | 2018-12-15T20:53:42 | 2018-12-15T20:53:42 | 161,942,572 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,805 | py | import codecs
list = codecs.open("List.txt", "r", 'utf-8')
ans = codecs.open("Answers.txt", 'r', 'utf-8')
class Question:
def __init__(self, text, n, a, b, c, d):
self.text = text
self.n = n
self.a = a
self.b = b
self.c = c
self.d = d
self.corr = 'а'
def printQuestion(obj):
print(obj.n)
print(obj.text)
print(obj.a)
print(obj.b)
print(obj.c)
print(obj.d)
print('Correct: ' + str(obj.corr))
def clearSpaces(line):
i = 0
for letter in line:
if letter.isalpha():
break
i += 1
result = line[i:]
return result
def findNum(line):
dot_index = line.find('.')
num = line[:dot_index]
return int(num)
def findText(line):
dot_index = line.find('.')
text = line[dot_index+1:]
text = clearSpaces(text)
text = text[0: len(text)-1]
return text
def findCorr(line, que):
l = line.split()
#print(l)
for i in range(len(l)//2):
num = int(l[i*2])
corr = l[(i*2)+1]
#print(num)
if corr == 'а':
que[num-1].corr = 1
if corr == 'б':
que[num - 1].corr = 2
if corr == 'в':
que[num - 1].corr = 3
if corr == 'г':
que[num - 1].corr = 4
question_list = []
i = 0
#Get questions
for line in list:
if line[0].isdigit():
num = findNum(line)
text = findText(line)
if line[0] == 'а':
a = findText(line)
if line[0] == 'б':
b = findText(line)
if line[0] == 'в':
c = findText(line)
if line[0] == 'г':
d = findText(line)
question_list.append(Question(text, num, a, b, c, d))
#Get answers
ans.readline()
for line in ans:
findCorr(line, question_list)
| [
"gsm10forhl2@yandex.ru"
] | gsm10forhl2@yandex.ru |
3fe822c29efca364f4d1bbcba30900abc107fc7c | 778c813b2a0aaa038a6ebe79783d1330de759515 | /src/net/CNNnet.py | ef4e06cbb4b738d28626049666fc5fdedafabb23 | [] | no_license | GRSEB9S/3D_object_detection | 3bb7f8e261d3c8600c6625087849dd71e22094ff | 87f593eee8ae8a62ce6ea83cc35bb22126777699 | refs/heads/master | 2021-06-21T15:39:30.413468 | 2017-08-13T22:02:28 | 2017-08-14T02:50:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,046 | py | import tensorflow as tf
VGG_MEAN = [103.939, 116.779, 123.68]
class rgb_net:
def __init__(self, var_file=None):
if (var_file is None):
# save_path
pass
else:
# self.dict = np.load(var_file).item()
pass
self.imgs = imgs
def inference():
# zero mean input
mean = tf.constant(VGG_MEAN, dtype=tf.float32, shape=[1,1,1,3], name='img_mean')
imgs = self.imgs - mean
# color transform
with tf.name_scope('color_space') as scope:
kernel = tf.Variable(tf.truncated_normal([1,1,1,3]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[3],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv = tf.nn.relu(out, name=scope)
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,3,64]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(conv, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[64],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv1_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv1_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,64,64]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(conv1_1, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[64],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv1_2 = tf.nn.relu(out, name=scope)
with tf.name_scope('pool_1') as scope:
pool_1 = tf.nn.max_pool(conv1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
with tf.name_scope('conv2_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,64,128]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(pool_1, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[128],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv2_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv2_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,128,128]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(conv2_1, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[128],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv2_2 = tf.nn.relu(out, name=scope)
with tf.name_scope('pool_2') as scope:
pool_2 = tf.nn.max_pool(conv2_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool2')
with tf.name_scope('conv3_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,128,256]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(pool_2, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv2_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv3_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,256,256]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(conv3_1, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv3_2 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv3_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,256,256]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(conv3_2, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv3_3 = tf.nn.relu(out, name=scope)
with tf.name_scope('pool_3') as scope:
pool_3 = tf.nn.max_pool(conv3_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool3')
with tf.name_scope('conv4_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,256,512]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(pool_3, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[512],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv4_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv4_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,512,512]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(conv4_1, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[512],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv4_2 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv4_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,512,512]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(conv4_2, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[512],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv4_3 = tf.nn.relu(out, name=scope)
with tf.name_scope('pool_4') as scope:
pool_4 = tf.nn.max_pool(conv4_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool4')
with tf.name_scope('conv5_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,512,512]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(pool_4, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[512],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv5_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv5_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,512,512]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(conv5_1, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[512],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv5_2 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv5_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,512,512]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(conv, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[512],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv5_2, biases)
conv5_3 = tf.nn.relu(out, name=scope)
with tf.name_scope('pool_5') as scope:
pool_5 = tf.nn.max_pool(conv5_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool5')
# return pool5
with tf.name_scope('top') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,512, 512]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(pool_5, kernel, [1,1,1,1], padding='SAME')
score_kernel = tf.Variable(tf.truncated_normal([1,1,512, 2*bases]), dtype=tf.float32, stddev=1e-1, name='score_weights')
scores = tf.conv2d(conv, score_kernel, [1,1,1,1], padding='SAME')
scores = tf.nn.softmax(tf.reshape(scores, [-1,2]), name='prob')
deltas_kernel = tf.Variable(tf.truncated_normal([1,1,512, 4*bases]), dtype=tf.float32, stddev=1e-1, name='deltas_weights')
deltas = tf.conv2d(conv, deltas_kernel, [1,1,1,1], padding='SAME')
return pool5, scores, probs, deltas
class front_net():
def __init__(self, var_file=None):
if (var_file is None):
# save_path
pass
else:
# self.dict = np.load(var_file).item()
pass
self.imgs = imgs
def inference():
# zero mean input
mean = tf.constant(VGG_MEAN, dtype=tf.float32, shape=[1,1,1,3], name='img_mean')
imgs = self.imgs - mean
# color transform
with tf.name_scope('color_space') as scope:
kernel = tf.Variable(tf.truncated_normal([1,1,1,3]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[3],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv = tf.nn.relu(out, name=scope)
with tf.name_scope('conv1_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,3,64]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[64],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv1_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv1_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,64,64]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[64],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv1_2 = tf.nn.relu(out, name=scope)
with tf.name_scope('pool_1') as scope:
pool_1 = tf.nn.max_pool(conv1_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
with tf.name_scope('conv2_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,64,128]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[128],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv2_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv2_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,128,128]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[128],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv2_2 = tf.nn.relu(out, name=scope)
with tf.name_scope('pool_2') as scope:
pool_2 = tf.nn.max_pool(conv2_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool2')
with tf.name_scope('conv3_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,128,256]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv2_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv3_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,256,256]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv3_2 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv3_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,256,256]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv3_3 = tf.nn.relu(out, name=scope)
with tf.name_scope('pool_3') as scope:
pool_4 = tf.nn.max_pool(conv3_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool3')
with tf.name_scope('conv4_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,256,512]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[512],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv2_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv4_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,512,512]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[512],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv2_2 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv4_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,512,512]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[512],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv2_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('pool_4') as scope:
pool_4 = tf.nn.max_pool(conv4_3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool4')
with tf.name_scope('conv5_1') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,512,512]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[512],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv5_1 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv5_2') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,512,512]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[512],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv5_2 = tf.nn.relu(out, name=scope)
with tf.name_scope('conv5_3') as scope:
kernel = tf.Variable(tf.truncated_normal([3,3,512,512]), dtype=tf.float32, stddev=1e-1, name='weights')
conv = tf.conv2d(imgs, kernel, [1,1,1,1], padding='SAME')
biases = tf.Variable(tf.constant(0.0,shape=[512],dtype=tf.float32), trainable=True,name='biases')
out = tf.nn.bias_add(conv, biases)
conv5_3 = tf.nn.relu(out, name=scope)
with tf.name_scope('pool_5') as scope:
pool_5 = tf.nn.max_pool(conv2_2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool5')
return pool5
| [
"wangxiaonku@gmail.com"
] | wangxiaonku@gmail.com |
9b88132ba556d288001a46be01dd66a4b99288dc | 2eed0a4829bcc38573f0615a1966e544b8dddaaa | /tutorial/spiders/mydomain.py | 5ad52e1a7b16de443f457a6d13ff73a7760e8646 | [] | no_license | allanwong/SpiderHouse | e01bb7bc200d0cdcbe22cb29aa9f209aca4dad50 | 44680c27bed06a93a906858789961f93b43e4502 | refs/heads/master | 2020-12-13T19:51:55.136159 | 2018-12-27T01:05:05 | 2018-12-27T01:05:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | # -*- coding: utf-8 -*-
# scrapy genspider mydomain mydomain.com
import scrapy
from scrapy_splash import SplashRequest
import requests
from scrapy.selector import Selector
class MydomainSpider(scrapy.Spider):
name = 'mydomain'
start_urls = 'https://www.toutiao.com/c/user/article/?page_type=1&user_id=3672368498&max_behot_time=0&count=20&as=A1B54B913E11F59&cp=5B1E812F55E91E1&_signature=t6g.3xAY7MQdh39fciiJU7eoP8'
script = '''
function main(splash, args)
assert(splash:go(args.url))
assert(splash:wait(0.5))
return {
html = splash:html(),
png = splash:png(),
har = splash:har(),
}
end
'''
def start_requests(self):
yield SplashRequest(self.start_urls, self.parse, args={'lua_source': self.script, 'wait': 0.5, 'images':0}, endpoint='execute')
def parse(self, response):
print response.text
pass
| [
"wang_bing_qi@163.com"
] | wang_bing_qi@163.com |
8cb58e8278aba4f4fb156bd5f9d734aa6e83197f | c56f00808d95f26ada452aca2beeb114d23e3056 | /01/batch_solver.py | dc21b61b63388545093dc09e5837ce0b28f6fdb1 | [] | no_license | Heramb001/cs580-Artificial-Intelligence | 25fe6544e20af89901cfb6b66f087c833810efc2 | 79a94ac39fc62e4fe335d84c7a9f18e609845b66 | refs/heads/master | 2021-07-17T15:29:56.521192 | 2020-09-07T19:50:19 | 2020-09-07T19:50:19 | 209,076,407 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,483 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Sep 16 21:03:51 2019
@author: HERAMB
24 puzzle problem
Initial State
--------------------------
| 9 | 24 | 3 | 5 | 17 |
--------------------------
| 6 | | 13 | 19 | 10 |
--------------------------
| 11 | 21 | 12 | 1 | 20 |
--------------------------
| 16 | 4 | 14 | 12 | 15 |
--------------------------
| 8 | 18 | 23 | 2 | 7 |
--------------------------
---- 0 is a blank tile which can be moved
Goal State
--------------------------
| 1 | 2 | 3 | 4 | 5 |
--------------------------
| 6 | 7 | 8 | 9 | 10 |
--------------------------
| 11 | 12 | 13 | 14 | 15 |
--------------------------
| 16 | 17 | 18 | 19 | 20 |
--------------------------
| 21 | 22 | 23 | 24 | 0 |
--------------------------
"""
#import libraries
from math import sqrt
from collections import deque
from state import State
from heapq import heappush, heappop, heapify
import time
#--- Specify a goal state which will be tested to stop the program.
goalState = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,0]
goalNode = State
initialState = [] #--- Initial State to be empty List
puzzleLen = 0 #--- Length of the puzzle (Eg - for 24 puzzle problem, len = 25)
puzzleSide = 0 #--- Length of puzzleSide (Eg - for 24 puzzle problem, side = 5)
nodesExpanded = 0 #--- no of nodes expanded, will be used in the end, initial = 0
maxDepthReached = 0 #--- Length of the depth reached
maxFringeSize = 0 #--- Maximum Size of Fringe, will be used in the end, initial = 0
moves = [] #--- this keeps track of all the moves that are required to reach the goal state
"""
Function - bfs() (Breadth First Search)
- works as a Breadth First Search algorithm.
"""
def bfs(startState):
global goalNode, maxFringeSize, maxDepthReached
visited, queue = set(), deque([State(startState, None, None, 0, 0, 0)])
while queue: #--- Execute until we have elements left in queue
node = queue.popleft() #--- pop the first state
visited.add(node.map) #--- Keep Track of Visited Nodes
#--- Goal Test
if node.state == goalState:
goalNode = node
return True, queue
#--- If not a goal state then expand the node
childNodes = expand(node)
for child in childNodes: #--- Traverse every child in the Level
if child.map not in visited: #--- Check if visited or not
queue.append(child) #--- if not visited append as a child
visited.add(child.map) #--- add it to the visited nodes set
if child.depth > maxDepthReached:
maxDepthReached += 1
if len(queue) > maxFringeSize:
maxFringeSize = len(queue)
#--- if search is complete and goal state not reached then return goal not found
return False, None
"""
Function - dfs() (Breadth First Search)
- works as a Depth First Search algorithm.
"""
def dfs(startState):
global goalNode, maxFringeSize, maxDepthReached
visited, stack = set(), list([State(startState, None, None, 0, 0, 0)])
while stack: #--- Execute until we have elements left in queue
node = stack.pop() #--- pop the first state
visited.add(node.map) #--- Keep Track of Visited Nodes
if node.state == goalState:
goalNode = node
return True, stack
neighbors = reversed(expand(node))
for neighbor in neighbors: #--- Traverse every child in the depth
if neighbor.map not in visited: #--- Check if visited or not
stack.append(neighbor) #--- if not visited append as a child
visited.add(neighbor.map) #--- add it to the visited nodes set
if neighbor.depth > maxDepthReached:
maxDepthReached += 1
if len(stack) > maxFringeSize:
maxFringeSize = len(stack)
#--- if search is complete and goal state not reached then return goal not found
return False, None
"""
Function - greedy() (Greedy Search)
- works as a Greedy Search with using a heuristic function.
"""
def greedy(startState, heuristicFunc):
global goalNode, maxFringeSize, maxDepthReached
visited, pQueue = set(), list()
key = heuristicFunc(startState)
root = State(startState, None, None, 0, 0, key)
entry = (key, 0, root)
heappush(pQueue, entry)
while pQueue:
node = heappop(pQueue)
#print(node)
visited.add(node[2].map)
if node[2].state == goalState:
goalNode = node[2]
return True, pQueue
neighbors = expand(node[2])
for neighbor in neighbors:
neighbor.key = heuristicFunc(neighbor.state)
entry = (neighbor.key, neighbor.move, neighbor)
if neighbor.map not in visited:
heappush(pQueue, entry)
visited.add(neighbor.map)
if neighbor.depth > maxDepthReached:
maxDepthReached += 1
if len(pQueue) > maxFringeSize:
maxFringeSize = len(pQueue)
#--- if search is complete and goal state not reached then return goal not found
return False, None
"""
Function - ast() (A star Search)
- works as a A star Search algorithm.
"""
def ast(startState, heuristicFunc):
global goalNode, maxFringeSize, maxDepthReached
visited, pQueue = set(), list()
key = heuristicFunc(startState)
root = State(startState, None, None, 0, 0, key)
entry = (key, 0, root)
heappush(pQueue, entry)
while pQueue:
node = heappop(pQueue)
#print(node)
visited.add(node[2].map)
if node[2].state == goalState:
goalNode = node[2]
return True, pQueue
neighbors = expand(node[2])
for neighbor in neighbors:
neighbor.key = neighbor.cost + heuristicFunc(neighbor.state)
entry = (neighbor.key, neighbor.move, neighbor)
if neighbor.map not in visited:
heappush(pQueue, entry)
visited.add(neighbor.map)
if neighbor.depth > maxDepthReached:
maxDepthReached += 1
if len(pQueue) > maxFringeSize:
maxFringeSize = len(pQueue)
#--- if search is complete and goal state not reached then return goal not found
return False, None
"""
Function - h1() (Heuristic Function 1 : number of misplaced tiles)
- works as a A star Search algorithm.
"""
def h1(state):
count = 0
for i in range(0,puzzleLen):
if not (state.index(i) == goalState.index(i)) :
count+=1
return count
"""
Function - h2() (Heuristic Function 2 : sum of the distances of every tile to its goal position.)
- works as a A star Search algorithm.
"""
def h2(state):
return sum(abs(p%puzzleSide - g%puzzleSide) + abs(p//puzzleSide - g//puzzleSide)
for p,g in ((state.index(i),goalState.index(i))
for i in range(1, puzzleLen)))
"""
Function - expand()
- expands the node and creates valid child nodes
- returns all valid child nodes of the current node
"""
def expand(node):
global nodesExpanded
nodesExpanded += 1
childNodes = []
#Append all the child to childNode for a valid move
childNodes.append(State(validMove(node.state,'D'),node,'D',node.depth + 1, node.cost + 1, 0)) #--- Down
childNodes.append(State(validMove(node.state,'L'),node,'L',node.depth + 1, node.cost + 1, 0)) #--- Left
childNodes.append(State(validMove(node.state,'R'),node,'R',node.depth + 1, node.cost + 1, 0)) #--- Right
childNodes.append(State(validMove(node.state,'U'),node,'U',node.depth + 1, node.cost + 1, 0)) #--- UP
nodes = [child for child in childNodes if child.state]
return nodes
"""
Function validMove()
- validates the next move as valid or invalid
- returns valid state if move is valid otherwise returns None
"""
def validMove(state, position):
newState = state[:]
index = newState.index(0) #--- get the position of blank tile
if position == 'U': # Up
if index not in range(0, puzzleSide): #--- Valid iff not present in top row
#--- Swap the empty tile with top element
temp = newState[index - puzzleSide]
newState[index - puzzleSide] = newState[index]
newState[index] = temp
return newState
else:
return None
if position == 'D': # Down
#--- Swap the empty tile with bottom element
if index not in range(puzzleLen - puzzleSide, puzzleLen):
temp = newState[index + puzzleSide]
newState[index + puzzleSide] = newState[index]
newState[index] = temp
return newState
else:
return None
if position == 'L': # Left
if index not in range(0, puzzleLen, puzzleSide):
temp = newState[index - 1]
newState[index - 1] = newState[index]
newState[index] = temp
return newState
else:
return None
if position == 'R': # Right
if index not in range(puzzleSide - 1, puzzleLen, puzzleSide):
temp = newState[index + 1]
newState[index + 1] = newState[index]
newState[index] = temp
return newState
else:
return None
"""
Function - get(dataList)
- Reads input from user and updates puzzle configuration
"""
def get(dataList):
global puzzleLen, puzzleSide
data = dataList.split(',')
for element in data:
initialState.append(int(element))
puzzleLen = len(initialState) #--- get the length f puzzle
puzzleSide = int(sqrt(puzzleLen)) #--- calculate square root in order to get the length of puzzle size
"""
Function - backtrack()
"""
def backtrack():
currentNode = goalNode
while initialState != currentNode.state : #--- terminating condition when we reach top node from bottom
moves.insert(0, currentNode.move)
currentNode = currentNode.parent
return moves
"""
Function - output(fringe, time)
- creates an output file with all the required elements
"""
def output(fringe, time, testNum):
if fringe:
global moves
moves = backtrack() #--- get all the moves performed to reach the goal state
file = open('testcase_greedy_'+str(testNum)+'.txt', 'w')
file.write("\npath_to_goal: " + str(moves))
file.write("\ncost_of_path: " + str(len(moves)))
file.write("\nnodes_expanded: " + str(nodesExpanded))
file.write("\nfringe_size: " + str(len(fringe)))
file.write("\nmax_fringe_size: " + str(maxFringeSize))
file.write("\nsearch_depth: " + str(goalNode.depth))
file.write("\nmax_search_depth: " + str(maxDepthReached))
file.write("\nrunning_time: " + format(time, '.8f'))
file.close()
else :
file = open('testcase_unsolvable.txt', 'w')
file.write("<-- # UNSOLVABLE # -->")
file.write("\nnodes_expanded: " + str(nodesExpanded))
file.write("\nmax_fringe_size: " + str(maxFringeSize))
file.write("\nmax_search_depth: " + str(maxDepthReached))
file.write("\nrunning_time: " + format(time, '.8f'))
file.close()
"""
Function - main()
- Executed everytime the python file starts.
"""
def main():
algorithm = input('--> Please select the algorithm \
\n1. bfs : Breadth First Search \
\n2. dfs : Depth First Search \
\n3. ast : A Star Search\
\n4. greedy : Greedy Search\
\n enter the selection : ')
# if greedy ask for whiich heuristic they are opting for
if(algorithm == 'greedy' or algorithm == 'ast'):
heuristic = input('-- Please select the Heuristic Function\
\n h1 : number of misplaced tiles\
\n h2 : sum of the distances of every tile to its goal position.\
\n-- Enter your choice : ')
heuristicFunc = heuristic_map[heuristic]
else:
heuristicFunc = None
#open the input file
f = open('input.txt','r')
inputs = f.readlines()
counter = 1
print(inputs)
for data in inputs:
print('--> solving for : ',data)
get(data)
function = function_map[algorithm]
start = time.time()
search, fringe = function(initialState, heuristicFunc)
stop = time.time()
if search :
output(fringe, stop-start, counter)
counter+=1
else :
print('Validated all the possible states but goal state not found')
print('<-- # UNSOLVABLE # -->')
output(fringe, stop-start)
initialState.clear()
moves.clear()
function_map = {
'bfs': bfs,
'dfs': dfs,
'greedy' : greedy,
'ast' : ast
}
heuristic_map = {
'h1' : h1,
'h2' : h2
}
if __name__ == '__main__':
main() | [
"hpendyal@odu.edu"
] | hpendyal@odu.edu |
fc3147d7f04a3b22e8777bcb8808c34133ca5112 | 6d59b155bcf3a61ff677d8ca103215af20ba40cd | /web_testing/04/04_homework.py | a8a818fe5468d6ba4cc5e124e5398a019db564de | [] | no_license | lilgaage/lilgaage_scripts | 1c82ba869e637a5971215b2eb084f3b2e9a4b954 | 9108f821c634ffae05423083b9330281f3ae5a57 | refs/heads/main | 2023-08-20T13:01:48.670270 | 2021-10-21T09:16:15 | 2021-10-21T09:16:15 | 419,627,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,596 | py | from selenium.webdriver.common.action_chains import ActionChains
from selenium import webdriver
from time import sleep
driver = webdriver.Chrome()
driver.maximize_window()
driver.implicitly_wait(10)
driver.get("http://www.baidu.com")
# 3.实现需求:在百度首页点击更多中的音乐,在百度音乐界面搜索《追梦赤子心》
action = ActionChains(driver)
current_handle = driver.current_window_handle
print("百度首页的窗口句柄是:", current_handle, type(current_handle))
more = driver.find_element_by_link_text("更多")
action.move_to_element(more).perform()
driver.find_element_by_link_text("音乐").click()
handles = driver.window_handles
print("所有的窗口句柄是:", handles, type(handles))
driver.switch_to.window(handles[1])
driver.find_element_by_css_selector('[placeholder="请输入歌名、歌词、歌手或专辑"]').send_keys("追梦赤子心")
driver.find_element_by_class_name('el-input__icon').click()
# # 4.实现需求:在百度首页点击【更多】中的【知道】,然后再知道页面搜索“南京”
# action = ActionChains(driver)
# current_handle = driver.current_window_handle
# more = driver.find_element_by_link_text("更多")
# action.move_to_element(more).perform()
# driver.find_element_by_link_text("知道").click()
# handles = driver.window_handles
# driver.switch_to.window(handles[1])
# driver.find_element_by_id('kw').send_keys('南京')
# driver.find_element_by_id('search-btn').click()
# # 5.实现需求:百度搜索广州,点击打开广州_百度百科,然后滚动条向下移动1000像素
# current_handle = driver.current_window_handle
# driver.find_element_by_id('kw').send_keys("广州")
# driver.find_element_by_id('su').click()
# driver.find_element_by_partial_link_text('百度百科').click()
# handles = driver.window_handles
# driver.switch_to.window(handles[1])
# js = "window.scrollTo(0,1000)"
# driver.execute_script(js)
# # 6.实现需求:百度搜索中公教育,点击打开中公官网,然后鼠标悬停在IT就业
# current_handle = driver.current_window_handle
# driver.find_element_by_id('kw').send_keys("中公教育")
# driver.find_element_by_id('su').click()
# driver.find_element_by_link_text('公务员考试网-2022国考公务员报名/时间/职位-培训-中公教育').click()
# handles = driver.window_handles
# driver.switch_to.window(handles[1])
# action = ActionChains(driver)
# more = driver.find_element_by_link_text("IT 就 业")
# action.move_to_element(more).perform()
sleep(3)
driver.quit()
| [
"noreply@github.com"
] | lilgaage.noreply@github.com |
57b3a80f42f4f66adc39fcd3b6844e63b361d6be | 3b1ac4b685bc4a5e905e62472329ffaca78e2315 | /Main Python scripts/Other Implementations trials/HackerLeagueDownload.py | effc9f6658b2368069c77f92d70fa85e25e2079b | [] | no_license | swapniltamse/HackerL | a0634b4c905a683326d220147b006b6fd01f0e0f | 5f5a8839ceb6371eae72337cd6fe7a4b876e8e7a | refs/heads/master | 2021-01-01T17:37:03.049262 | 2014-10-15T00:57:40 | 2014-10-15T00:57:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,208 | py | __author__ = 'stamse'
from urllib import request
hlData1Url = "data:text/csv;charset=utf-8,id%2Cname%2Cslug%2Cdescription%2Cexternal_url%2Cstate%2Cstart_time%2Cend_time%2Clogo%2Cstudents_only%2Ctotal_hackers%2Ctotal_hacks%2Clocation%2Fcity%2Clocation%2Fstate%2Clocation%2Fcountry%2Curl%0D%0A53cfaab8bd0623c6d4000008%2CContent%20Hack%20Day%2Ccontent-hack-day%2C%22Content%20Hack%20Day%20intends%20to%20explore%2C%20break%2C%20twist%2C%20or%20reboot%20the%20definition%20of%20content%20through%2032%20hours%20of%20coding%2C%20remixing%2C%20and%20information%20deployment.%20Put%20content%20in%20unthinkable%20mediums%2C%20fill%20mediums%20with%20unseen%20content.%0D%0A%0D%0AThe%20aim%20is%20that%20developers%2C%20designers%2C%20artists%20and%20other%20media-savvy%20people%20prototype%2C%20glitch%20and%20publish%20different%20kinds%20of%20content%20across%20whatever%20platform%20they%20can%20think%20of.%22%2C%2Cpending%2C2014-11-15T08%3A00%3A00Z%2C2014-11-16T16%3A00%3A00Z%2Chttps%3A%2F%2Fs3.amazonaws.com%2Fhackerleague-production%2Forganizations%2Flogos%2F53cfaab8bd0623c6d4000008%2Foriginal.png%3F1406118580%2Cfalse%2C7%2C0%2CBerlin%2CBerlin%2CGermany%2Chttp%3A%2F%2Fhackerleague.org%2Fhackathons%2Fcontent-hack-day%0D%0A54346c2a55014b3d83000009%2CMedia%20Hack%20Day%20-%20Video%2Cmedia-hack-day-video%2C%22**The%20Challenge**%0D%0AThe%20world%20of%20publishing%20is%20changing.%20Rapid%20advances%20in%20technology%20and%20a%20spirit%20of%20innovation%20have%20transformed%20almost%20all%20aspects%20of%20newspapers%20and%20news%20media.%20And%20online%20is%20not%20just%20the%20translation%20of%20print.%20Therefore%20we%20want%20you%20to%20explore%20the%20possibilities%20of%20online%20video%20for%20publishing.%0D%0A%0D%0A**The%20Mission**%0D%0AJoin%20our%20Media%20Hack%20Day%20and%20find%20new%20ways%20to%20reinvent%20and%20reengineer%20video%20content%20for%20the%20digital%20age.%20The%20hackathon%20will%20bring%20together%20creative%20programmers%2C%20designers%2C%20tech-savvy%20journalists%20and%20product%20visionaries.%20Join%20us%20if%20you%20are%20an%20individual%20developer%2C%20designer%20or%20established%20hackathon%20team.%0D%0A%0D%0A**The%20Reward**%0D%0AThe%20Mediahackday%20takes%20place%20as%20a%20warm%20up%20for%20VDZ%20Tech%20Summit%202014.%20Prizes%2C%20an%20international%20jury%20and%20the%20praise%20from%20the%20startup%20community%20and%20media%20owners%20are%20waiting.%20Already%20got%20an%20inspiring%20idea%3F%20Post%20it%20at%20this%20site%20so%20others%20can%20see%20and%20join%20your%20team.%0D%0A%0D%0A**Participation**%0D%0AGet%20your%20free%20Ticket%20for%20Participation%20SOON.%0D%0AMore%20information%20on%20our%20%5BWebsite%5D(http%3A%2F%2Fmediahackday.com%2F).%22%2Chttp%3A%2F%2Fmediahackday.com%2F%2Cpending%2C2014-11-15T08%3A00%3A00Z%2C2014-11-16T16%3A00%3A00Z%2Chttps%3A%2F%2Fs3.amazonaws.com%2Fhackerleague-production%2Forganizations%2Flogos%2F54346c2a55014b3d83000009%2Foriginal.png%3F1412721704%2Cfalse%2C3%2C0%2CBerlin%2C%2CGermany%2Chttp%3A%2F%2Fhackerleague.org%2Fhackathons%2Fmedia-hack-day-video%0D%0A543cb6f60c9d7d8365000019%2CBitHack%3A%20Bitcoin%20Student%20Hackathon%2Cbithack-bitcoin-student-hackathon%2C%EF%BF%BC%EF%BF%BC%EF%BF%BCNYU's%20first%20cryptocurrency-themed%20hackathon%20will%20bring%20together%20students%20from%20universities%20along%20the%20East%20Coast%20(and%20beyond!)%20to%20realize%20ideas%20and%20innovations%20around%20Bitcoin%20and%20the%20future%20of%20finance.%20Participants%20will%20hear%20from%20bitcoin%20experts%20and%20present%20in%20front%20of%20an%20esteemed%20panel%20of%20judges%20after%20hacking%20in%20the%20university's%20brand%20new%20Mark%20and%20Debra%20Leslie%20Entrepreneur's%20Lab.%20%2Chttp%3A%2F%2Fwww.nyuentrepreneur.com%2F%2Cpending%2C2014-11-14T18%3A30%3A00Z%2C2014-11-15T23%3A00%3A00Z%2Chttps%3A%2F%2Fs3.amazonaws.com%2Fhackerleague-production%2Forganizations%2Flogos%2F543cb6f60c9d7d8365000019%2Foriginal.png%3F1413265196%2Ctrue%2C0%2C0%2CNew%20York%2CNY%2CUS%2Chttp%3A%2F%2Fhackerleague.org%2Fhackathons%2Fbithack-bitcoin-student-hackathon%0D%0A5436a275c11fba6200000004%2CIndoor%20Location%20Hackathon%2Cindoor-location-hackathon%2C%22The%20rise%20of%20indoor%20location%2C%20a%20hackathon%20within%20the%20Where%20Camp%20conference%20in%20Berlin!%0D%0A%0D%0A_interactive%20tour%20guides.%20real-time%20heatmaps.%20indoor%20navigation%20for%20visually%20impaired.%20location-aware%20home%20and%20IoT%20solutions.%20proximity%20beacon%20interactions.%20real-time%20location%20sharing.%20indoor%20gamification.%20contextual%20user%20experiences._%0D%0A%0D%0Ajust%20to%20name%20a%20few%20of%20the%20ideas%20that%20come%20to%20mind%20when%20talking%20about%20indoor%20localization.%20namely%20this%20topic%20is%20becoming%20very%20popular%20these%20days%20but%20at%20the%20same%20time%20just%20a%20few%20of%20its%20applications%20are%20being%20discussed%2C%20not%20to%20mention%20developed%20and%20deployed.%20%0D%0A%0D%0Awe%20want%20to%20dedicate%20one%20day%20to%20spontaneous%20indoor%20and%20iBeacon%20hacks.%20just%20to%20open%20up%20our%20imagination%20and%20think%20of%20the%20immense%20opportunities%20indoor%20location%20and%20iBeacons%20could%20offer%20to%20our%20society.%20how%20we%20could%20connect%20physical%20and%20digital%20worlds%20in%20a%20playful%2C%20useful%20and%20innovative%20way%20to%20benefit%20consumers%2C%20venue%20owners%2C%20event%20organizers%2C%20researchers%2C%20kids%2C%20parents%2C%20old%20people%2C...%20etc.%20etc.%20%0D%0A%0D%0AThe%20main%20idea%20is%20having%20a%20fun%20day%20kaching%20indoor%20location.%20To%20bring%20the%20original%20hackathon%20spirit%20back%2C%20there%20won%E2%80%99t%20be%20cash%20prizes%2C%20but%20we%20promise%20iBeacon%20sets%20and%20something%20more.%20%0D%0A%0D%0ABreakfast%2C%20lunch%20and%20dinner%20will%20be%20provided%20as%20well%20as%20lots%20of%20caffeine.%20And%20of%20course%20a%20few%20rewarding%20pints%20of%20beer%20at%20the%20end%20of%20the%20day!%0D%0A%0D%0A%0D%0A**Agenda**%0D%0A%0D%0A_Friday%2C%2014th%20November%202014_%0D%0A%0D%0A17.30%20-%20Hackathon%20Warm-up%20%0D%0A%0D%0A_Saturday%2C%2015th%20November%202014_%0D%0A%0D%0A9.30%20-%20Hackathon%20doors%20open%0D%0A9.30%20-%2010.00%20-%20Breakfast%20%26%20Brainstorming%20%0D%0A10.00%20-%20Intro%20indoo.rs%20SDK%20and%20iBeacons%0D%0A10.30%20-%20Idea%20pitching%20and%20forming%20groups%20%0D%0A11.30%20-%20%20Hacking%20%0D%0A13.00%20-%20Lunch%20(Pizza)%0D%0A18.00%20-%20Pitches%0D%0A19.00%20-%20Awards%20%26%20beer%0D%0A%0D%0A**Resources%20for%20hackers%3A**%0D%0A%5Bmy.indoo.rs%5D(http%3A%2F%2Fmy.indoo.rs%2F)%0D%0A%5Bbeaconinside.com%2Fgetstarted%5D(http%3A%2F%2Fbeaconinside.com%2Fgetstarted)%0D%0A%5Bdeveloper.apple.com%2Fibeacon%5D(https%3A%2F%2Fdeveloper.apple.com%2Fibeacon%2F)%0D%0A%0D%0A**Equipment%3A**%0D%0APlease%20bring%20your%20laptop.%20We%20will%20provide%20you%20with%20iBeacons%20and%20SDKs.%0D%0A%0D%0A_Sponsored%20by%20indoo.rs%20and%20Beaconinside_%0D%0A%22%2C%2Cpending%2C2014-11-14T16%3A30%3A00Z%2C2014-11-15T19%3A00%3A00Z%2Chttps%3A%2F%2Fs3.amazonaws.com%2Fhackerleague-production%2Forganizations%2Flogos%2F5436a275c11fba6200000004%2Foriginal.jpg%3F1412866675%2Cfalse%2C5%2C0%2CBerlin%2CBerlin%2CGermany%2Chttp%3A%2F%2Fhackerleague.org%2Fhackathons%2Findoor-location-hackathon%0D%0A543564317f04ca7d11000022%2CAppHack%203%2Capphack-3%2C%22The%20Highcountry's%20largest%20hackathon%20is%20back!%20More%20prizes%2C%20more%20developers%2C%20more%20free%20food!%22%2Chttp%3A%2F%2Fcs.appstate.edu%2Fapphack%2F%2Cpending%2C2014-11-15T00%3A00%3A00Z%2C2014-11-15T12%3A00%3A00Z%2Chttps%3A%2F%2Fs3.amazonaws.com%2Fhackerleague-production%2Forganizations%2Flogos%2F543564317f04ca7d11000022%2Foriginal.png%3F1412785196%2Cfalse%2C0%2C0%2CBoone%2CNorth%20Carolina%2CUnited%20States%2Chttp%3A%2F%2Fhackerleague.org%2Fhackathons%2Fapphack-3%0D%0A540fd2190b963dc81200004b%2CAT%26T%20CNY%20Civic%20App%20Challenge%2Cat-and-t-cny-civic-app-challenge%2C%22The%20AT%26T%20Central%20New%20York%20Civic%20App%20Challenge%20is%20a%20two%20month%20virtual%20hackathon%20for%20CNY's%20most%20talented%20computer%20scientists%2C%20software%20developers%2C%20designers%20and%20hardware%20engineers%20to%20build%20products%20that%20have%20the%20potential%20to%20benefit%20the%20local%20community.%20AT%26T%20and%20its%20partners%20aim%20to%20unite%20developers%20in%20the%20greater%20CNY%20region%20to%20develop%20mobile%20apps%20serving%20community%20needs%20while%20demonstrating%20how%20mobile%20technologies%20can%20lead%20to%20the%20next%20generation%20of%20tech%20jobs%20and%20investment.%0D%0A%0D%0AThe%20virtual%20app%20challenge%20will%20kick-off%20at%20the%20Tech%20Garden%20in%20Downtown%20Syracuse%20September%2011th%20at%2011AM%20and%20conclude%20November%2011th%20at%2011%3A59PM.%20Submissions%20will%20be%20accepted%20virtually%20at%3A%20http%3A%2F%2Fattcny.hackupstate.com%2F%0D%0A%0D%0AWinners%20will%20be%20announced%20and%20apps%20will%20be%20demoed%20November%2019th%20during%20Global%20Entrepreneur%20Week.%0D%0A%0D%0A%23%23Prizes%0D%0A%0D%0AThe%20AT%26T%20CNY%20Civic%20App%20Challenge%20has%20two%20submission%20categories.%20Each%20category%20has%20a%20%247%2C500%20grand%20prize%20and%20a%20%241%2C500%20second%20place%20prize.%0D%0A%0D%0ADevelopers%20already%20working%20on%20their%20community%20service-themed%20app%20prior%20to%20the%20launch%20of%20the%20App%20Challenge%20must%20submit%20to%20the%20%E2%80%9CBeta%E2%80%9D%20category.%20Apps%20started%20from%20scratch%20specifically%20for%20this%20event%20submit%20to%20the%20%E2%80%9CAlpha%E2%80%9D%20category.%20When%20submitting%20your%20app%2C%20please%20state%20which%20track%20it%20belongs%20to.%20If%20unlisted%2C%20it%E2%80%99ll%20be%20entered%20in%20the%20beta%20track%20against%20apps%20that%20have%20been%20worked%20on%20for%20longer.%0D%0A%0D%0A%23%23Judging%20Criteria%0D%0A%0D%0ASubmitted%20apps%20will%20be%20judged%20on%20potential%20impact%20on%20the%20Greater%20Central%20New%20York%20community%2C%20execution%20and%20creativity%20or%20novelty.%20%20The%20judging%20panel%20will%20include%20local%20tech%20experts%2C%20community%20leaders%20and%20elected%20officials.%0D%0A%0D%0APlease%20be%20aware%20that%20one%20member%20of%20each%20team%20must%20be%20a%20resident%20or%20attend%20an%20institute%20of%20higher%20learning%20within%20one%20of%20the%2013%20counties%20of%20Central%20New%20York%20--%20Seneca%2C%20Cayuga%2C%20Cortland%2C%20Tompkins%2C%20Onondaga%2C%20Herkimer%2C%20Oneida%2C%20Madison%2C%20Lewis%2C%20Jefferson%2C%20St.%20Lawrence%2C%20Oswego%2C%20and%20Broome%20County.%0D%0A%0D%0A%23%23Judges%0D%0A%0D%0A*%20Hon.%20David%20J.%20Valesky%2C%20New%20York%20State%20Senator%20(53rd%20District)%0D%0A*%20Hon.%20Samuel%20D.%20Roberts%2C%20New%20York%20State%20Assemblyman%0D%0A*%20Marissa%20Shorenstein%2C%20AT%26T%20New%20York%20President%0D%0A*%20Deborah%20F.%20Stanley%2C%20President%2C%20SUNY%20Oswego%0D%0A*%20Sean%20Branagan%2C%20Director%2C%20Newhouse%20School%20Center%20for%20Digital%20Media%20Entrepreneurship%2C%20Syracuse%20University%20%2F%20Partner%2C%20Echelon%20Capital%20%2F%20Partner%2C%20C3%20Strategic%20%2F%20Founder%20%26%20President%2C%20Communigration%0D%0A*%20Seth%20Mulligan%2C%20Vice%20President%20for%20Innovation%20Services%2C%20CenterState%20Corporation%20for%20Economic%20Opportunity%20(CEO)%0D%0A*%20Steve%20Maier%2C%20Senior%20Technical%20Evangelist%2C%20Microsoft%0D%0A*%20Stacey%20Keefe%2C%20Executive%20Director%2C%20RvD%20IDEA%20at%20Syracuse%20University%0D%0A*%20Andrew%20Nicklin%2C%20Director%20of%20Open%20NY%0D%0A*%20Kate%20Brodock%2C%20President%2C%20Girls%20in%20Tech%0D%0A%0D%0A%23%23How%20to%20Submit%0D%0A%0D%0A*%20It's%20encouraged%20that%20native%20Android%2C%20Windows%2C%20or%20iOS%20apps%20finished%20early%20are%20publish%20to%20Google%20Play%20or%20the%20App%20Store%20in%20order%20to%20streamline%20the%20judging%20process.%20Please%20provide%20the%20download%20link%20before%20the%20submission%20deadline.%0D%0A%0D%0A*%20For%20native%20apps%20not%20in%20one%20of%20the%20two%20primary%20app%20stores%2C%20follow%20the%20steps%20below%3A%0D%0A%0D%0A%20%20*%20For%20apps%20built%20for%20iOS%2C%20please%20upload%20the%20beta%20to%20TestFlight%20and%20share%20it%20with%20Hack%20Upstate.%20To%20do%20so%2C%20log%20in%20to%20TestFlight%E2%80%99s%20web%20client%2C%20go%20to%20the%20dashboard%2C%20click%20%E2%80%9CInvite%20People%2C%E2%80%9D%20and%20invite%20doug%40hackupstate.com.%0D%0A%0D%0A%20%20*%20For%20Android%20or%20Windows%20apps%2C%20submissions%20will%20be%20accepted%20through%20Hockey.%20Once%20the%20app%20is%20successfully%20uploaded%2C%20please%20share%20it%20with%20doug%40hackupstate.com.%0D%0A%0D%0A*%20Mobile-optimized%20web%20applications%20and%20SMS%2FMMS%20apps%20are%20welcome%20as%20well.%20Please%20provide%20us%20with%20the%20link%20to%20where%20these%20products%2Fservices%20can%20be%20accessed.%0D%0A%0D%0A*%20After%20uploading%20and%20sharing%20your%20app%2C%20submit%20your%20team's%20information%20through%20Hacker%20League's%20%22%22Projects%22%22%20section.%20Be%20sure%20to%20include%20the%20name%2C%20phone%20number%2C%20email%20address%2C%20and%20Hacker%20League%20profile%20for%20each%20team%20member.%20This%20ensures%20all%20member%20are%20awarded%20the%20prize%20money%20if%20your%20team%20wins.%20It%20also%20keeps%20each%20member%20up%20to%20date%20on%20announcements%2C%20event%20updates%2C%20and%20special%20offers.%0D%0AIn%20the%20information%20section%2C%20be%20sure%20to%20list%20whether%20you%E2%80%99re%20submission%20is%20for%20the%20alpha%20or%20beta%20track.%20If%20unlisted%2C%20you%E2%80%99ll%20be%20entered%20in%20the%20Beta%20track%20with%20apps%20that%20have%20been%20in%20development%20for%20several%20months.%0D%0A%0D%0A*%20Each%20team%20must%20also%20provide%20a%20short%20video%20demoing%20how%20your%20app%20is%20intended%20to%20work%2C%20what%20problem%20it%20sets%20out%20to%20solve%2C%20and%20any%20struggles%20you%20encountered%20while%20working%20on%20it.%20These%20videos%20help%20explain%20your%20app%20to%20the%20judges%3B%20video%20quality%20will%20not%20effect%20judging%20scores.%20The%20video%20can%20be%20as%20simple%20as%20screenshots%20and%20a%20voiceover%2C%20or%20it%20can%20be%20a%20full%20videocast.%20Videos%20can%20be%20uploaded%20to%20any%20video%20hosting%20site%20--%20YouTube%2C%20Vimeo%2C%20Ziggeo%2C%20etc.%0D%0A%0D%0A*%20For%20walkthroughs%20on%20how%20to%20upload%20apps%20to%20TestFlight%20and%20Hockey%2C%20follow%20the%20links%20below%3A%0D%0A%20*%20TestFlight%20--%20http%3A%2F%2Fhelp.testflightapp.com%2Fcustomer%2Fportal%2Farticles%2F829857-how-do-i-prepare-a-build-for-distribution-through-testflight-%0D%0A%0D%0A%20*%20Hockey%20--%20http%3A%2F%2Fsupport.hockeyapp.net%2Fkb%2Fabout-general-faq%2Fhow-to-create-a-new-app%0D%0A%0D%0A%23%23Partners%0D%0A%0D%0A*%20AT%26T%0D%0A*%20Syracuse%20University%0D%0A*%20SUNY%20Oswego%0D%0A*%20RVD%20IDEA%0D%0A*%20CenterState%20CEO%0D%0A*%20Syracuse%20Tech%20Garden%0D%0A*%20Hack%20Upstate%0D%0A*%20Girls%20in%20Tech%0D%0A%0D%0A%23%23Contacts%0D%0A%0D%0ABen%20Roberts%20--%20ben.roberts%40att.com%0D%0ATom%20Charles%20--%20tom%40hackupstate.com%0D%0ABilly%20Ceskavich%20--%20billy%40hackupstate.com%0D%0A%0D%0AAll%20questions%2C%20comments%2C%20and%20suggestions%20are%20welcome%20and%20encouraged!%22%2Chttp%3A%2F%2Fattcny.hackupstate.com%2F%2Chacking%2C2014-09-11T15%3A00%3A00Z%2C2014-11-12T04%3A59%3A00Z%2Chttps%3A%2F%2Fs3.amazonaws.com%2Fhackerleague-production%2Forganizations%2Flogos%2F540fd2190b963dc81200004b%2Foriginal.png%3F1410322966%2Cfalse%2C28%2C1%2CCentral%20New%20York%2CNY%2CUnited%20States%2Chttp%3A%2F%2Fhackerleague.org%2Fhackathons%2Fat-and-t-cny-civic-app-challenge%0D%0A540d3c92032d9a5968000055%2CAT%26T%20WNY%20Civic%20App%20Challenge%2Cat-and-t-wny-civic-app-challenge%2C%22The%20AT%26T%20Western%20New%20York%20Civic%20App%20Challenge%20is%20a%20two%20month%20virtual%20hackathon%20for%20WNY's%20most%20talented%20computer%20scientists%2C%20software%20developers%2C%20designers%20and%20hardware%20engineers%20to%20build%20products%20that%20have%20the%20potential%20to%20benefit%20the%20local%20community.%20AT%26T%20and%20its%20partners%20aim%20to%20unite%20developers%20in%20the%20greater%20WNY%20region%20to%20develop%20mobile%20apps%20serving%20community%20needs%20while%20demonstrating%20how%20mobile%20technologies%20can%20lead%20to%20the%20next%20generation%20of%20tech%20jobs%20and%20investment.%0D%0A%0D%0AThe%20virtual%20app%20challenge%20will%20kick-off%20at%20d!g%20Buffalo%20on%20September%2010th%20at%2011AM%20and%20conclude%20November%2010th%20at%2011%3A59PM.%20Submissions%20will%20be%20accepted%20virtually%20at%3A%20http%3A%2F%2Fattwny.hackupstate.com%2F%0D%0A%0D%0AWinners%20will%20be%20announced%20and%20apps%20will%20be%20demoed%20November%2020th%20during%20Global%20Entrepreneur%20Week.%0D%0A%0D%0A%23%23Grand%20Prize%3A%20%2410%2C000%0D%0ASecond%20Prize%3A%20%245%2C000%0D%0AThird%20Prize%3A%20%243%2C000%0D%0A%0D%0A%23%23Judging%20Criteria%0D%0A%0D%0ASubmitted%20apps%20will%20be%20judged%20on%20potential%20impact%20on%20the%20Greater%20Western%20New%20York%20community%2C%20execution%20and%20creativity%20or%20novelty.%20The%20judging%20panel%20includes%20local%20tech%20experts%2C%20community%20leaders%20and%20elected%20officials.%0D%0A%0D%0APlease%20be%20aware%20that%20one%20member%20of%20each%20team%20must%20be%20a%20resident%20or%20attend%20an%20institute%20of%20higher%20learning%20within%20one%20of%20the%20eight%20counties%20of%20Western%20New%20York%20--%20Cattaraugus%2C%20Chautauqua%2C%20Erie%2C%20Wyoming%2C%20Genesee%2C%20Orleans%2C%20Niagara%20and%20Allegany.%0D%0A%0D%0A%23%23Judges%0D%0A%0D%0A*%20Hon.%20Mark%20Grisanti%2C%20New%20York%20State%20Senator%20(60th%20District)%0D%0A*%20Hon.%20Sean%20Ryan%2C%20New%20York%20State%20Assemblyman%0D%0A*%20Marissa%20Shorenstein%2C%20AT%26T%20New%20York%20President%0D%0A*%20Michael%20Weiner%2C%20President%20%26%20CEO%2C%20United%20Way%20of%20Buffalo%20and%20Erie%20County%0D%0A*%20Marnie%20LaVigne%2C%20PhD%2C%20President%20%26%20CEO%2C%20Launch%20NY%2C%20Inc.%0D%0A*%20Martin%20K.%20Casstevens%2C%20Business%20Formation%20and%20Commercialization%20Manager%2C%20University%20at%20Buffalo%E2%80%99s%20Office%20of%20Science%2C%20Technology%20Transfer%20and%20Economic%20Outreach%0D%0A*%20Navpreet%20Jatana%2C%20Board%20Member%2C%20InfoTech%20Niagara%20%2F%20Enterprise%20Information%20Security%20Manager%2C%20Health%20Now%0D%0A*%20Dr.%20Reneta%20Barneva%2C%20Chair%20of%20the%20Computer%20and%20Information%20Science%20Department%2C%20State%20University%20of%20New%20York%20at%20Fredonia%0D%0A*%20Dan%20Magnuszewski%2C%20Managing%20Director%2C%20Z80%20Labs%20Technology%20Incubator%0D%0A*%20Patrick%20Whalen%2C%20COO%2C%20Buffalo%20Niagara%20Medical%20Campus%2C%20Inc.%20(BNMC)%0D%0A*%20Andrew%20Nicklin%2C%20Director%20of%20Open%20NY%0D%0A*%20Joel%20Colombo%2C%20President%2C%20360%20PSG%0D%0A%0D%0A%23%23How%20To%20Submit%0D%0A%0D%0A*%20It's%20encouraged%20that%20native%20Android%20or%20iOS%20apps%20finished%20early%20are%20publish%20to%20Google%20Play%20or%20the%20App%20Store%20in%20order%20to%20streamline%20the%20judging%20process.%20Please%20provide%20the%20download%20link%20before%20the%20submission%20deadline.%0D%0A%0D%0A*%20For%20native%20apps%20not%20in%20one%20of%20the%20two%20primary%20app%20stores%2C%20follow%20the%20steps%20below%3A%0D%0A%0D%0A%20%20*%20For%20apps%20built%20for%20iOS%2C%20please%20upload%20the%20beta%20to%20TestFlight%20and%20share%20it%20with%20Hack%20Upstate.%20To%20do%20so%2C%20log%20in%20to%20TestFlight%E2%80%99s%20web%20client%2C%20go%20to%20the%20dashboard%2C%20click%20%E2%80%9CInvite%20People%2C%E2%80%9D%20and%20invite%20doug%40hackupstate.com.%0D%0A%0D%0A%20%20*%20For%20Android%20apps%2C%20submissions%20will%20be%20accepted%20through%20Hockey.%20Once%20the%20app%20is%20successfully%20uploaded%2C%20please%20share%20it%20with%20doug%40hackupstate.com.%0D%0A%0D%0A%0D%0A*%20Mobile-optimized%20web%20applications%20and%20SMS%2FMMS%20apps%20are%20welcome%20as%20well.%20Please%20provide%20us%20with%20the%20link%20to%20where%20these%20products%2Fservices%20can%20be%20accessed.%0D%0A%0D%0A*%20After%20uploading%20and%20sharing%20your%20app%2C%20submit%20your%20team's%20information%20through%20Hacker%20League's%20%22%22Projects%22%22%20section.%20Be%20sure%20to%20include%20the%20name%2C%20phone%20number%2C%20and%20email%20address%20for%20each%20team%20member%20--%20this%20is%20MANDATORY%20for%20us%20to%20contact%20the%20winning%20team%20and%20distribute%20the%20prize%20money.%20It%20also%20keeps%20each%20member%20up%20to%20date%20on%20announcements%2C%20event%20updates%2C%20and%20special%20offers.%0D%0A%0D%0A*%20Each%20team%20must%20also%20provide%20a%20short%20video%20demoing%20how%20your%20app%20is%20intended%20to%20work%2C%20what%20problem%20it%20sets%20out%20to%20solve%2C%20and%20any%20struggles%20you%20encountered%20while%20working%20on%20it.%20These%20videos%20help%20explain%20your%20app%20to%20the%20judges%3B%20video%20quality%20will%20not%20effect%20judging%20scores.%20The%20video%20can%20be%20as%20simple%20as%20screenshots%20and%20a%20voiceover%2C%20or%20it%20can%20be%20a%20full%20videocast.%20Videos%20can%20be%20uploaded%20to%20any%20video%20hosting%20site%20--%20YouTube%2C%20Vimeo%2C%20Ziggeo%2C%20etc.%0D%0A%0D%0A*%20For%20walkthroughs%20on%20how%20to%20upload%20apps%20to%20TestFlight%20and%20Hockey%2C%20follow%20the%20links%20below%3A%0D%0A%20*%20TestFlight%20--%20http%3A%2F%2Fhelp.testflightapp.com%2Fcustomer%2Fportal%2Farticles%2F829857-how-do-i-prepare-a-build-for-distribution-through-testflight-%0D%0A%0D%0A%20*%20Hockey%20--%20http%3A%2F%2Fsupport.hockeyapp.net%2Fkb%2Fabout-general-faq%2Fhow-to-create-a-new-app%0D%0A%0D%0A%23%23Partners%3A%0D%0A%0D%0A*%20AT%26T%0D%0A*%20Z80%20Labs%0D%0A*%20University%20at%20Buffalo%20Center%20for%20Entrepreneurial%20Leadership%0D%0A*%20UB%20Center%20of%20Excellence%20in%20Bioinformatics%20and%20Life%20Sciences%0D%0A*%20UB%20Office%20of%20Science%2C%20Technology%2C%20Transfer%2C%20and%20Economic%20Outreach%0D%0A*%20United%20Way%20of%20Buffalo%20%26%20Erie%20County%0D%0A*%20State%20University%20of%20New%20York%20at%20Fredonia%0D%0A*%20InfoTech%20Niagara%0D%0A*%20LaunchNY%0D%0A*%20d!g%20Buffalo%0D%0A*%20Hack%20Upstate%0D%0A%0D%0A%23%23Contacts%3A%0D%0A%0D%0ABen%20Roberts%20--%20ben.roberts%40att.com%0D%0ATom%20Charles%20--%20tom%40hackupstate.com%0D%0ABilly%20Ceskavich%20--%20billy%40hackupstate.com%0D%0A%0D%0AAll%20questions%2C%20comments%2C%20and%20suggestions%20are%20welcome%20and%20encouraged!%22%2Chttp%3A%2F%2Fattwny.hackupstate.com%2F%2Chacking%2C2014-09-10T15%3A00%3A00Z%2C2014-11-11T04%3A59%3A00Z%2Chttps%3A%2F%2Fs3.amazonaws.com%2Fhackerleague-production%2Forganizations%2Flogos%2F540d3c92032d9a5968000055%2Foriginal.png%3F1410153614%2Cfalse%2C74%2C8%2CWestern%20New%20York%2CNY%2CUnited%20States%2Chttp%3A%2F%2Fhackerleague.org%2Fhackathons%2Fat-and-t-wny-civic-app-challenge%0D%0A53fe678e33831ee564000025%2CHackNJIT%202014%2Chacknjit-2014%2C%22HackNJIT%20is%20a%2024-hour%20hackathon%20at%20NJIT%20in%20Newark%2C%20NJ.%20Find%20out%20more%20information%20at%20%5Bhacknjit.org%5D(http%3A%2F%2Fhacknjit.org).%22%2Chttp%3A%2F%2Fhacknjit.org%2F%2Cpending%2C2014-11-09T04%3A19%3A00Z%2C2014-11-10T04%3A19%3A00Z%2Chttps%3A%2F%2Fs3.amazonaws.com%2Fhackerleague-production%2Forganizations%2Flogos%2F53fe678e33831ee564000025%2Foriginal.png%3F1409181578%2Ctrue%2C0%2C0%2CNewark%2CNJ%2CUnited%20States%2Chttp%3A%2F%2Fhackerleague.org%2Fhackathons%2Fhacknjit-2014%0D%0A5422e680567605bcb800001c%2CHackNashville%206%2Chacknashville-6%2C%22%5B!%5BHackNashville%20%5D(https%3A%2F%2Ffbcdn-sphotos-g-a.akamaihd.net%2Fhphotos-ak-xaf1%2Ft31.0-8%2F473197_459244034153243_1808135651_o.jpg)%5D(http%3A%2F%2Fhacknashville.com)%0D%0A%0D%0A%23%23%23%20_Come%20get%20your%20hack%20on._%0D%0A%0D%0AHackNashville%20is%20the%20premier%20hackathon%20in%20the%20southeast.%20Come%20join%20us%20at%20our%20sixth%20event%2C%20November%207-9%20starting%20at%206pm%20at%20429%20Event%20Space%5B429%20Event%20Space%5D(https%3A%2F%2Fwww.google.com%2Fmaps%2Fplace%2F429%2BChestnut%2BSt%2C%2BNashville%2C%2BTN%2B37203%2F%4036.1434588%2C-86.7688812%2C15z%2Fdata%3D!4m2!3m1!1s0x8864666e10d5b64d%3A0x3aaae3951dc05899).%0D%0A%0D%0AAs%20always%2C%20attendance%20is%20free%2C%20and%20we'll%20be%20giving%20away%20great%20swag%20and%20providing%20meals%20and%20beverages.%20%0D%0A%0D%0A---------------------------------------------------------%0D%0A%0D%0A%23%23%23%20HackNashville%20in%20the%20press%0D%0A%0D%0ANot%20sure%20what%20API%20to%20use.%20Check%20out%20the%20API%20Explore%20to%20browse%20and%20explore%20what%20each%20API%20does%2C%20without%20writing%20a%20single%20line%20of%20code!%0D%0A%0D%0A%5BNPR%5D%0D%0A(http%3A%2F%2Fnashvillepublicradio.org%2Fblog%2F2014%2F05%2F05%2F48-hours-straight-computer-programming-hack-nashville-makes-happen%2F)%0D%0A%5BThe%20Tennessean%5D(http%3A%2F%2Fwww.tennessean.com%2Fstory%2Fmoney%2F2014%2F05%2F01%2Fhack-nashville-pushes-limits%2F8588215%2F)%0D%0A%5BSouthernAlpha%5D%0D%0A(http%3A%2F%2Fsouthernalpha.com%2F3-nashville-developers-love-nashville-hacknashville%2F)%0D%0A%5BNashville%20Business%20Journal%5D%0D%0A(http%3A%2F%2Fwww.bizjournals.com%2Fnashville%2Fnews%2F2013%2F01%2F30%2Fhack-nashville-event-leads-to.html)%0D%0A%5BReddit%5D%0D%0A(http%3A%2F%2Fwww.reddit.com%2Fr%2Fnashville%2Fcomments%2F24rx72%2Fhack_nashville_was_48_hours_of_programming_kegs%2F)%0D%0A%5BMoontoast%20Blog%5D%0D%0A(http%3A%2F%2Fblog.moontoast.com%2Fblog%2Fhack-nashville-5-new-level-awesome%2F)%0D%0A%5BSouthernAlpha%5D%0D%0A(http%3A%2F%2Fsouthernalpha.com%2F19-products-people-know-hacknashville%2F)%0D%0A%0D%0A---------------------------------------------------------%0D%0A%0D%0A%23%23%23%20Agenda%0D%0A%0D%0A%23%23%23%23%20Friday%20MEET%20N'%20GREET%2011%2F7%0D%0A6pm%20%E2%80%93%207pm%20%3A%20Meet%20%26%20Greet%20(food%20provided)%0D%0A7pm%20%E2%80%93%2010pm%3A%20Share%20your%20idea%20and%20form%20teams%0D%0A*Staying%20overnight%20is%20allowed%20but%20not%20required%0D%0ASaturday%0D%0A%0D%0A%23%23%23%23%20Saturday%20GET%20IT%20DONE!%2011%2F8%0D%0A8am%20%E2%80%93%2010pm%20%3A%20Project%20Teams%20working%20feverishly%0D%0AFood%20%26%20Beverages%20provided%0D%0A*Staying%20overnight%20is%20allowed%20but%20not%20required%0D%0A%0D%0A%23%23%23%23%20Sunday%20PRESENTATIONS%2011%2F9%0D%0A8am%20%E2%80%93%205pm%20%3A%20Final%20Work%20on%20Team%20Projects%0D%0A5pm%20%E2%80%93%208pm%3A%20Expo-style%20project%20demos.%20%0D%0A*ALL%20are%20welcome%20at%20the%20demos%0D%0A%0D%0A---------------------------------------------------------%0D%0A%0D%0A%23%23%23%20Ahem.%20Prizes!%0D%0A%0D%0AWe'll%20be%20giving%20away%20tons%20of%20swag%20and%20prizes%2C%20and%20as%20always%2C%20the%20coveted%20Hacker's%20choice%20award%20goes%20to%20the%20team%20with%20the%20most%20highly%20rated%20project%3A%0D%0A%0D%0A%5B!%5BHackNashville%20%5D(https%3A%2F%2Fpbs.twimg.com%2Fmedia%2FArIgf2HCQAAuzGj.jpg%3Alarge)%0D%0A%0D%0A---------------------------------------------------------%22%2Chttp%3A%2F%2Fhacknashville.com%2Cpending%2C2014-11-07T23%3A00%3A00Z%2C2014-11-10T01%3A00%3A00Z%2Chttps%3A%2F%2Fs3.amazonaws.com%2Fhackerleague-production%2Forganizations%2Flogos%2F5422e680567605bcb800001c%2Foriginal.png%3F1411573374%2Cfalse%2C2%2C0%2CNashville%2CTN%2CUnited%20States%2Chttp%3A%2F%2Fhackerleague.org%2Fhackathons%2Fhacknashville-6%0D%0A540560e441cc6dc9d7000002%2CTeen%20Hackathon%2Cteen-hackathon%2CAll%20details%20for%20the%20hackathon%20can%20be%20found%20here%3A%20http%3A%2F%2Fteenhackathon.com%2Chttp%3A%2F%2Fteenhackathon.com%2Cpending%2C2014-11-08T18%3A00%3A00Z%2C2014-11-10T00%3A00%3A00Z%2Chttps%3A%2F%2Fs3.amazonaws.com%2Fhackerleague-production%2Forganizations%2Flogos%2F540560e441cc6dc9d7000002%2Foriginal.png%3F1409733529%2Cfalse%2C2%2C0%2CCupertino%2CCA%2CUSA%2Chttp%3A%2F%2Fhackerleague.org%2Fhackathons%2Fteen-hackathon%0D%0A"
print (hlData1Url)
def downloadHlData(csv_url):
response = request.urlopen(csv_url)
csv = response.read()
csv_string = str(csv)
lines = csv_string.split("\\n")
destination_url = "downloadFile1.csv"
fileOpen = open(destination_url, "w")
for line in lines:
fileOpen.write(line + "\n")
fileOpen.close()
downloadHlData(hlData1Url)
| [
"swapnil.tamse@gmail.com"
] | swapnil.tamse@gmail.com |
413bde56ec836dc7356ea0348825b4d1d122f971 | d5a462ae5359c7ceb70713c3184e105e8efb6b26 | /10 poer sugero safe driver prediction/rgf-target-encoding-0-282-on-lb.py | c6194a75d9d3270170656bd0146164f78df3ee2f | [
"MIT"
] | permissive | MlvPrasadOfficial/KaggleNoteboooks_of_Projects | b88bcfaa27d693bd5698179138b59817dedca762 | 379e062cf58d83ff57a456552bb956df68381fdd | refs/heads/master | 2020-07-28T13:27:02.373642 | 2019-11-22T10:20:32 | 2019-11-22T10:20:32 | 209,424,303 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,930 | py | import numpy as np
import pandas as pd
from rgf.sklearn import RGFClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from numba import jit
import time
import gc
import subprocess
import glob
# Compute gini
# from CPMP's kernel https://www.kaggle.com/cpmpml/extremely-fast-gini-computation
@jit
def eval_gini(y_true, y_prob):
y_true = np.asarray(y_true)
y_true = y_true[np.argsort(y_prob)]
ntrue = 0
gini = 0
delta = 0
n = len(y_true)
for i in range(n-1, -1, -1):
y_i = y_true[i]
ntrue += y_i
gini += y_i * delta
delta += 1 - y_i
gini = 1 - 2 * gini / (ntrue * (n - ntrue))
return gini
# Funcitons from olivier's kernel
# https://www.kaggle.com/ogrellier/xgb-classifier-upsampling-lb-0-283
def gini_xgb(preds, dtrain):
labels = dtrain.get_label()
gini_score = -eval_gini(labels, preds)
return [('gini', gini_score)]
def add_noise(series, noise_level):
return series * (1 + noise_level * np.random.randn(len(series)))
def target_encode(trn_series=None, # Revised to encode validation series
val_series=None,
tst_series=None,
target=None,
min_samples_leaf=1,
smoothing=1,
noise_level=0):
"""
Smoothing is computed like in the following paper by Daniele Micci-Barreca
https://kaggle2.blob.core.windows.net/forum-message-attachments/225952/7441/high%20cardinality%20categoricals.pdf
trn_series : training categorical feature as a pd.Series
tst_series : test categorical feature as a pd.Series
target : target data as a pd.Series
min_samples_leaf (int) : minimum samples to take category average into account
smoothing (int) : smoothing effect to balance categorical average vs prior
"""
assert len(trn_series) == len(target)
assert trn_series.name == tst_series.name
temp = pd.concat([trn_series, target], axis=1)
# Compute target mean
averages = temp.groupby(by=trn_series.name)[target.name].agg(["mean", "count"])
# Compute smoothing
smoothing = 1 / (1 + np.exp(-(averages["count"] - min_samples_leaf) / smoothing))
# Apply average function to all target data
prior = target.mean()
# The bigger the count the less full_avg is taken into account
averages[target.name] = prior * (1 - smoothing) + averages["mean"] * smoothing
averages.drop(["mean", "count"], axis=1, inplace=True)
# Apply averages to trn and tst series
ft_trn_series = pd.merge(
trn_series.to_frame(trn_series.name),
averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
on=trn_series.name,
how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# pd.merge does not keep the index so restore it
ft_trn_series.index = trn_series.index
ft_val_series = pd.merge(
val_series.to_frame(val_series.name),
averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
on=val_series.name,
how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# pd.merge does not keep the index so restore it
ft_val_series.index = val_series.index
ft_tst_series = pd.merge(
tst_series.to_frame(tst_series.name),
averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
on=tst_series.name,
how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# pd.merge does not keep the index so restore it
ft_tst_series.index = tst_series.index
return add_noise(ft_trn_series, noise_level), add_noise(ft_val_series, noise_level), add_noise(ft_tst_series, noise_level)
# Read data
train_df = pd.read_csv('../input/train.csv', na_values="-1") # .iloc[0:200,:]
test_df = pd.read_csv('../input/test.csv', na_values="-1")
# from olivier
train_features = [
"ps_car_13", # : 1571.65 / shadow 609.23
"ps_reg_03", # : 1408.42 / shadow 511.15
"ps_ind_05_cat", # : 1387.87 / shadow 84.72
"ps_ind_03", # : 1219.47 / shadow 230.55
"ps_ind_15", # : 922.18 / shadow 242.00
"ps_reg_02", # : 920.65 / shadow 267.50
"ps_car_14", # : 798.48 / shadow 549.58
"ps_car_12", # : 731.93 / shadow 293.62
"ps_car_01_cat", # : 698.07 / shadow 178.72
"ps_car_07_cat", # : 694.53 / shadow 36.35
"ps_ind_17_bin", # : 620.77 / shadow 23.15
"ps_car_03_cat", # : 611.73 / shadow 50.67
"ps_reg_01", # : 598.60 / shadow 178.57
"ps_car_15", # : 593.35 / shadow 226.43
"ps_ind_01", # : 547.32 / shadow 154.58
"ps_ind_16_bin", # : 475.37 / shadow 34.17
"ps_ind_07_bin", # : 435.28 / shadow 28.92
"ps_car_06_cat", # : 398.02 / shadow 212.43
"ps_car_04_cat", # : 376.87 / shadow 76.98
"ps_ind_06_bin", # : 370.97 / shadow 36.13
"ps_car_09_cat", # : 214.12 / shadow 81.38
"ps_car_02_cat", # : 203.03 / shadow 26.67
"ps_ind_02_cat", # : 189.47 / shadow 65.68
"ps_car_11", # : 173.28 / shadow 76.45
"ps_car_05_cat", # : 172.75 / shadow 62.92
"ps_calc_09", # : 169.13 / shadow 129.72
"ps_calc_05", # : 148.83 / shadow 120.68
"ps_ind_08_bin", # : 140.73 / shadow 27.63
"ps_car_08_cat", # : 120.87 / shadow 28.82
"ps_ind_09_bin", # : 113.92 / shadow 27.05
"ps_ind_04_cat", # : 107.27 / shadow 37.43
"ps_ind_18_bin", # : 77.42 / shadow 25.97
"ps_ind_12_bin", # : 39.67 / shadow 15.52
"ps_ind_14", # : 37.37 / shadow 16.65
]
# add combinations
combs = [
('ps_reg_01', 'ps_car_02_cat'),
('ps_reg_01', 'ps_car_04_cat'),
]
# Process data
id_test = test_df['id'].values
id_train = train_df['id'].values
y = train_df['target']
start = time.time()
for n_c, (f1, f2) in enumerate(combs):
name1 = f1 + "_plus_" + f2
print('current feature %60s %4d in %5.1f'
% (name1, n_c + 1, (time.time() - start) / 60), end='')
print('\r' * 75, end='')
train_df[name1] = train_df[f1].apply(lambda x: str(x)) + "_" + train_df[f2].apply(lambda x: str(x))
test_df[name1] = test_df[f1].apply(lambda x: str(x)) + "_" + test_df[f2].apply(lambda x: str(x))
# Label Encode
lbl = LabelEncoder()
lbl.fit(list(train_df[name1].values) + list(test_df[name1].values))
train_df[name1] = lbl.transform(list(train_df[name1].values))
test_df[name1] = lbl.transform(list(test_df[name1].values))
train_features.append(name1)
X = train_df[train_features]
test_df = test_df[train_features]
f_cats = [f for f in X.columns if "_cat" in f]
y_valid_pred = 0*y
y_test_pred = 0
# Set up folds
K = 5
kf = KFold(n_splits = K, random_state = 1, shuffle = True)
np.random.seed(0)
# Run CV
def run_rgf():
model = RGFClassifier(
max_leaf=1000,
algorithm="RGF",
loss="Log",
l2=0.01,
sl2=0.01,
normalize=False,
min_samples_leaf=10,
n_iter=None,
opt_interval=100,
learning_rate=.5,
calc_prob="sigmoid",
n_jobs=-1,
memory_policy="generous",
verbose=0
)
fit_model = model.fit( X_train, y_train )
pred = fit_model.predict_proba(X_valid)[:,1]
pred_test = fit_model.predict_proba(X_test)[:,1]
try:
subprocess.call('rm -rf /tmp/rgf/*', shell=True)
print("Clean up is successfull")
print(glob.glob("/tmp/rgf/*"))
except Exception as e:
print(str(e))
return pred, pred_test
for i, (train_index, test_index) in enumerate(kf.split(train_df)):
# Create data for this fold
y_train, y_valid = y.iloc[train_index].copy(), y.iloc[test_index]
X_train, X_valid = X.iloc[train_index,:].copy(), X.iloc[test_index,:].copy()
X_test = test_df.copy()
print( "\nFold ", i)
# Enocode data
for f in f_cats:
X_train[f + "_avg"], X_valid[f + "_avg"], X_test[f + "_avg"] = target_encode(
trn_series=X_train[f],
val_series=X_valid[f],
tst_series=X_test[f],
target=y_train,
min_samples_leaf=200,
smoothing=10,
noise_level=0
)
# Run model for this fold
X_train = X_train.fillna(X_train.mean())
X_valid = X_valid.fillna(X_valid.mean())
X_test = X_test.fillna(X_test.mean())
# Generate validation predictions for this fold
pred, pred_test = run_rgf()
print( " Gini = ", eval_gini(y_valid, pred) )
y_valid_pred.iloc[test_index] = pred
# Accumulate test set predictions
y_test_pred += pred_test
del X_test, X_train, X_valid, y_train
gc.collect()
gc.collect()
gc.collect()
y_test_pred /= K # Average test set predictions
print( "\nGini for full training set:" )
eval_gini(y, y_valid_pred)
# Save validation predictions for stacking/ensembling
val = pd.DataFrame()
val['id'] = id_train
val['target'] = y_valid_pred.values
val.to_csv('rgf_valid.csv', float_format='%.6f', index=False)
# Create submission file
sub = pd.DataFrame()
sub['id'] = id_test
sub['target'] = y_test_pred
sub.to_csv('rgf_submit.csv', float_format='%.6f', index=False)
| [
"noreply@github.com"
] | MlvPrasadOfficial.noreply@github.com |
c9816561b3e191bbcd544b2288a6e29705b965fe | 60dd6073a3284e24092620e430fd05be3157f48e | /tiago_public_ws/build/pal_gripper/pal_gripper/catkin_generated/pkg.develspace.context.pc.py | ffa8184246ae074a71bbd494df35fa3b06cbfed1 | [] | no_license | SakshayMahna/Programming-Robots-with-ROS | e94d4ec5973f76d49c81406f0de43795bb673c1e | 203d97463d07722fbe73bdc007d930b2ae3905f1 | refs/heads/master | 2020-07-11T07:28:00.547774 | 2019-10-19T08:05:26 | 2019-10-19T08:05:26 | 204,474,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "pal_gripper"
PROJECT_SPACE_DIR = "/media/root/BuntuDrive/Programming-Robots-with-ROS/tiago_public_ws/devel"
PROJECT_VERSION = "1.0.2"
| [
"sakshum19@gmail.com"
] | sakshum19@gmail.com |
d0595e90dfda563698df1d304f0075999bdc4226 | e6762d218e7faed894e400390618201dbaaab1a1 | /OpenCargo/views.py | 71d9f0c080e5434ef384cfcb4220b916219fcc50 | [] | no_license | brunofnz/OpenCargo | 9f4ef888bce1cb57cc3f09ae07428cdd627b1998 | 2aa9131225fc74f4bc080e03e6c4d99baf40e49c | refs/heads/master | 2022-12-25T08:28:52.110514 | 2020-10-01T11:09:37 | 2020-10-01T11:09:37 | 295,845,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py |
from django.shortcuts import render
def landingPage(request):
return render(request, "index.html")
def preguntas(request):
return render(request, "preguntas.html")
| [
"brunofernandez@protonmail.com"
] | brunofernandez@protonmail.com |
41d507a8e4bd8b6a986b405390399eed12d87983 | 7af55f356c8440b0df7679d2dca54179af183662 | /hola.py | 4c56199cee0c8821d6eae700fda5f5a05f4f3318 | [] | no_license | Hug0Albert0/Flask-Basic-Application | 19b5fcd81d3d093e030a929381982c4d0a25a6c4 | f989d41395bb3a34859a09b9e237084d32fb5476 | refs/heads/master | 2021-05-17T19:15:31.346333 | 2020-03-29T02:19:55 | 2020-03-29T02:19:55 | 250,933,826 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,387 | py | from flask import Flask, request, make_response, redirect, abort, render_template, url_for, session, flash
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from datetime import datetime
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
app = Flask(__name__)
bootstrap = Bootstrap(app)
moment = Moment(app)
app.config["SECRET_KEY"] = "13301053"
class FormularioNombreIndex(FlaskForm):
nombre = StringField("¿Cuál es tu nombre?", validators=[DataRequired()])
registrar = SubmitField("Registrar")
@app.route("/", methods = ["GET", "POST"])
@app.route("/index/", methods = ["GET", "POST"])
def index():
formulario_nombre = FormularioNombreIndex()
if formulario_nombre.validate_on_submit():
viejo_nombre = session.get("nombre_usuario")
if viejo_nombre is not None and viejo_nombre != formulario_nombre.nombre.data:
flash("Has cambiado tu nombre")
session["nombre_usuario"] = formulario_nombre.nombre.data
return redirect(url_for("index"))
return render_template(
"index.html",
formulario_nombre = formulario_nombre,
nombre_usuario = session.get("nombre_usuario"),
ahora = datetime.utcnow()
)
@app.route("/mascota/")
@app.route("/mascota/<nombre_animal>")
def user(nombre_animal = None):
return render_template(
"mascota.html",
nombre_animal = nombre_animal
)
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html"), 404
#@app.route("/agent/")
#def agent():
# print(request.headers)
# user_agent = request.headers.get("User-Agent")
# """
# request.headers => Host,User-Agent,Accept,Accept-Language,Accept-Encoding,
# Connection,Upgrade-Insecure-Requests
# """
# return "<p>Tu navegador es {user_agent}</p>".format(user_agent = user_agent)
##Error 404
#@app.route("/error/")
#def error():
# return "<h1>No soy mesero, soy el taquero...y abro a las 9</h1>", 400
#@app.route("/response/")
#def response():
# response = make_response("<h1>Este documento tiene una cookie!</h1>")
# response.set_cookie("answer", "42")
# return response
#@app.route('/user/<id>')
#def get_user(id):
#user = load_user(id)
#if not user:
#abort(404)
#return '<h1>Hello, {}</h1>'.format(user.name)
#@app.route('/')
#def index():
#return redirect('http://www.example.com') | [
"hugorivera.cool.player@gmail.com"
] | hugorivera.cool.player@gmail.com |
06442dca7af233b67ca6349ed47b2083415f747f | ecfcdb0d95b2e89f09e62a094b5cd6fda20c529e | /ourdb/botutils.py | c4c647504bfab2fcf2097670d511be9163f37b5c | [
"MIT"
] | permissive | SnowyCoder/telegram-ourdb | ad9deee57379a2adcfef5053f820b401d3a8f731 | b7d2f0dd426b238b1c305e9abfd222fe597031e9 | refs/heads/master | 2020-03-22T08:05:03.709603 | 2018-08-23T13:03:39 | 2018-08-23T13:03:39 | 139,743,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,116 | py | from functools import wraps
def build_menu(buttons, n_cols, header_buttons=None, footer_buttons=None):
menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)]
if header_buttons:
menu.insert(0, header_buttons)
if footer_buttons:
menu.append(footer_buttons)
return menu
def restrict_to(users, on_unauthorized_access):
def restricted(func):
@wraps(func)
def wrapped(bot, update, *args, **kwargs):
user_id = update.effective_user.id
if user_id not in users:
on_unauthorized_access(bot, update)
return
return func(bot, update, *args, **kwargs)
return wrapped
return restricted
def strip_command(cmd):
"""Returns the command given stripping first part name and removing any first spaces
'/echo test' -> 'test'
"""
if cmd[0] != '/':
return cmd.lstrip(' ')
first_space = cmd.find(' ')
if first_space == -1:
return ''
return cmd[first_space:].lstrip(' ')
def lookahead(iterable):
"""Pass through all values from the given iterable, augmented by the
information if there are more values to come after the current one
(True), or if it is the last value (False).
"""
# Get an iterator and pull the first value.
it = iter(iterable)
last = next(it)
# Run the iterator to exhaustion (starting from the second value).
for val in it:
# Report the *previous* value (more to come).
yield last, True
last = val
# Report the last value.
yield last, False
def edit_or_send(bot, update, text, reply_markup=None):
if update.callback_query and update.callback_query.message.text:
update.callback_query.message.edit_text(text, reply_markup=reply_markup)
else:
bot.send_message(chat_id=update.effective_chat.id, text=text, reply_markup=reply_markup)
VALID_DEEPLINK_CHARS = str.maketrans('', '', 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-')
def is_valid_deeplink(s):
return not str(s).translate(VALID_DEEPLINK_CHARS)
| [
"snowycoder@gmail.com"
] | snowycoder@gmail.com |
ae98da46328e826e1d2741094e574db10858ed90 | 8bd1933442252a38e489b72df87ff4eaaab4802e | /data/python/north.py | ffaf906ddc5831bf319fc42cbea62ca8c3c43e51 | [] | no_license | easycastle/rainbow-meal | bf7b617e9a7549316d163e53b2b4696ff30f8929 | 8f27123ad40f40b41f55346fa4f6b6bdb6643420 | refs/heads/main | 2023-03-22T19:26:04.154903 | 2021-01-11T01:08:42 | 2021-01-11T01:08:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | import pandas as pd
df = pd.read_csv("data_3.csv", header = None, names = ['메뉴', '가격', '가게', '링크', '장소'], encoding = 'utf-8')
for x in range(len(df)):
if (type(df['가게'][x]) == str):
df.loc[x, '가게'] = "<a href = " + df['링크'][x] + ">" + df['가게'][x] + "</a>"
df_filtered = df[df['장소'] == '북문']
df_filtered = df_filtered.drop('링크', axis=1)
df_filtered.to_html("north.html", escape = False) | [
"tula3and@gmail.com"
] | tula3and@gmail.com |
f5b56f29f4f4617601ccfc4e4e6c1557209d395a | 32cd1bfe728b4efa84b16da0b63df4d095df5569 | /Arrays/FirstDuplicate.py | 0bfee925231d534fff518a92236d7b152223b489 | [] | no_license | denvinnpaolo/AlgoExpert-Road-to-Cerification-Questions | 21e58c080906d7ab740c339b18fbf83ce020b664 | 120d04000b2ab6cc664c323ef3572dc3a7ea87b4 | refs/heads/main | 2023-03-07T06:11:45.285155 | 2021-02-18T05:07:00 | 2021-02-18T05:07:00 | 327,754,785 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 818 | py | # First Duplicate Value
# Difficulty: Medium
# Instruction:
# Given an array of integers between 1 and n , inclusive, where n is the length of the array, write a function
# that returns the first integer that appears more than once (when the array is
# read from left to right).
# In other words, out of all the integers that might occur more than once in the
# input array, your function should return the one whose first duplicate value
# has the minimum index.
# If no integer appears more than once, your function should return -1
# Note that you're allowed to mutate the input array.
def firstDuplicateValue(array):
# Write your code here.
d = {}
for i in range (len(array)):
if array[i] not in d:
d[array[i]] = array[i]
elif array[i] in d:
return array[i]
return -1 | [
"denvinnpaolo@gmail.com"
] | denvinnpaolo@gmail.com |
f903c442aee0263c16da660e86b0ec16555e3da6 | d45de88d276bfa76ad0345b718c50f5d3c0f3d7f | /days_until_event.py | c3d8fafc5001f53f473ee5c63372eb3e0ab29e38 | [] | no_license | donniewherman/Pythonista_scene | 3c6d5ffa07f4c63fe06ee75d54937c8ea98387e8 | 11e43bf94c70c10fe74f931a7ab43df9ccf4e3d1 | refs/heads/master | 2021-01-17T06:00:12.676067 | 2015-08-07T21:40:41 | 2015-08-07T21:40:41 | 42,383,096 | 1 | 0 | null | 2015-09-13T04:02:48 | 2015-09-13T04:02:47 | null | UTF-8 | Python | false | false | 1,217 | py | # See: https://omz-forums.appspot.com/pythonista/post/6142748495183872
import console, datetime, scene
fmt = '{} is {} days away.'
class days_until_event(scene.Scene):
def __init__(self, event_name, event_date):
self.event_name = event_name
self.event_date = event_date
scene.run(self)
def setup(self):
self.center = self.bounds.center()
self.font_size = 64 if self.size.w > 700 else 32
def draw(self):
scene.background(0, 0, 0)
msg = fmt.format(self.event_name, (self.event_date - datetime.date.today()).days)
scene.text(msg, 'Futura', self.font_size, *self.center)
prompt = '''Please enter the event name.
i.e. First day of school'''
event_name = console.input_alert('Event', prompt, '', 'Enter').strip() or 'My event'
prompt = '''Please enter the date you would like to countdown to.
i.e. 2009 (year),6 (month),29 (day)'''
event_date = console.input_alert('Date', prompt, '', 'Enter')
try:
year, month, day = [int(s.strip()) for s in event_date.split(',')]
event_date = datetime.date(year, month, day)
except ValueError:
exit('Incorrect date format (must be "year, month, day")')
days_until_event(event_name, event_date)
| [
"cclauss@bluewin.ch"
] | cclauss@bluewin.ch |
a6c70a7e6b270d06977f70f509ce24378ca308aa | bf542e318773faaaa48931f08f64264748514024 | /utils/sub8_ros_tools/sub8_misc_tools/__init__.py | d501d40f359d107e294b89b9839daf1c0dab63bb | [
"MIT"
] | permissive | guojiyao/Sub8 | 3b30f517e65cf127fd1e6ee797e4318c8c4be227 | 6de4bcf20eb2863ec0d06234539ffb19892009f7 | refs/heads/master | 2020-05-21T10:12:18.471001 | 2016-03-29T00:45:35 | 2016-03-29T00:45:35 | 54,933,672 | 0 | 0 | null | 2016-03-29T00:37:37 | 2016-03-29T00:37:36 | null | UTF-8 | Python | false | false | 69 | py | from download import download_and_unzip
from download import download | [
"jpanikulam@ufl.edu"
] | jpanikulam@ufl.edu |
8f362b8ed7c76e2766013bb4e6803278ae161094 | 981d425745639e5338de6847184fac2ab0175ce8 | /src/test.py | 087de4f1eb7b797d4bdeeecfa6a5b65c9a23e61e | [] | no_license | exploring-realities/Mobi | 17f06dd0fcdda30eab9519992d29d2530f4bc307 | f6f0e5d779424979d32e8175066bebe83399f289 | refs/heads/master | 2021-07-06T05:28:15.451058 | 2017-10-01T22:53:00 | 2017-10-01T22:53:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | #!/usr/bin/python
import datetime
import crawler
now = datetime.datetime.now()
on = str(now.day) + "." + str(now.month) + "." + str(now.year)
at = str(now.hour) + ":" + str(now.minute)
response_json = crawler.request_station_info("Hallerstrasse", at, on)
print response_json | [
"vinh-ngu@hotmail.com"
] | vinh-ngu@hotmail.com |
28ff6b2296afb7d1d4496fc672755f2ebafa1a28 | 4ab036510c20feffbd66b812483c77dd0fde3f10 | /scene/Receiver.py | 20e967a5512dbaf7317a232f765bc33d00bdcbb4 | [
"MIT"
] | permissive | FJFranklin/wifi-py-rpi-car-controller | abf6c4bd8c6bef953a9cad435d54be1b65771a5e | 0667f9d4e7a6dab135d5b5762d319fafb7af6874 | refs/heads/master | 2022-12-07T09:24:46.995917 | 2022-11-15T09:16:20 | 2022-11-15T09:16:20 | 82,803,955 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | import numpy as np
from View import View
class Receiver(object):
def __init__(self, space, origin, cube_dimension, material):
self._origin = np.copy(origin)
self._space = space
self._views = []
self._material = material
polygons = space.cube(self._origin, cube_dimension, material, True)
for p in polygons:
self._views.append(View(self._origin, p))
def search(self, show_projections=False):
sources = []
for it in range(0,5):
resolved = []
while len(self._views) > 0:
v = self._views.pop(0)
resolved += v.search(self._space.polygons)
while len(resolved) > 0:
v = resolved.pop(0)
material = v.region.target.material
if material.is_source():
if show_projections:
v.show_history(self._space)
sources.append(v.copy())
if material.is_refractive():
tv, rv = v.refract_view()
#self._space.cube(rv.region.origin, 0.1, self._material, True)
self._views.append(tv) # through-view
self._views.append(rv) # refracted view
elif material.is_reflective():
self._views.append(v.reflect_view())
print("Sources total: " + str(len(sources)))
| [
"fjf@alinameridon.com"
] | fjf@alinameridon.com |
d432478397bf133a423bca8172f73dfbdf6dd036 | a34e3d435f48ef87477d3ae13ca8a43015e5052c | /pyopengl2.py | 788e2feeea3f13c7cc5bba01fafc836249f2b5da | [] | no_license | haehn/sandbox | 636069372fc7bb7fd72b5fde302f42b815e8e9b0 | e49a0a30a1811adb73577ff697d81db16ca82808 | refs/heads/master | 2021-01-22T03:39:03.415863 | 2015-02-11T23:16:22 | 2015-02-11T23:16:22 | 26,128,048 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,880 | py | import sys
from OpenGL.GLUT import *
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GL import shaders
from OpenGL.arrays import vbo
# from vrml.arrays import *
from numpy import concatenate, identity, transpose, multiply
import numpy
from datetime import datetime
class Sample15:
def __init__(self):
self.current_time = None
self.current_angle = 0.0
vertex_shader = shaders.compileShader("""
attribute vec4 vPosition;
attribute vec4 vColor;
uniform mat4 modelMatrix;
uniform float rotationAngle;
varying vec4 varyingColor;
// function from http://www.neilmendoza.com/glsl-rotation-about-an-arbitrary-axis/
mat4 rotationMatrix(vec3 axis, float angle) {
axis = normalize(axis);
float s = sin(angle);
float c = cos(angle);
float oc = 1.0 - c;
return mat4(oc * axis.x * axis.x + c, oc * axis.x * axis.y - axis.z * s, oc * axis.z * axis.x + axis.y * s, 0.0,
oc * axis.x * axis.y + axis.z * s, oc * axis.y * axis.y + c, oc * axis.y * axis.z - axis.x * s, 0.0,
oc * axis.z * axis.x - axis.y * s, oc * axis.y * axis.z + axis.x * s, oc * axis.z * axis.z + c, 0.0,
0.0, 0.0, 0.0, 1.0);
}
void main() {
mat4 rotation = rotationMatrix(vec3(0.1, 0.2, 0.3), rotationAngle);
gl_Position = modelMatrix * rotation * vPosition;
varyingColor = vColor;
}""", GL_VERTEX_SHADER)
fragment_shader = shaders.compileShader("""
varying vec4 varyingColor;
void main() {
gl_FragColor = varyingColor;
}""", GL_FRAGMENT_SHADER)
self.shader = shaders.compileProgram(vertex_shader, fragment_shader)
shaders.glUseProgram(self.shader)
self.position_location = glGetAttribLocation(
self.shader, 'vPosition'
)
self.color_location = glGetAttribLocation(
self.shader, 'vColor'
)
self.model_matrix_location = glGetUniformLocation(
self.shader, 'modelMatrix'
)
self.rotation_angle_location = glGetUniformLocation(
self.shader, 'rotationAngle'
)
vertex_positions = numpy.array([
-1.0, -1.0, -1.0, 1.0,
-1.0, -1.0, 1.0, 1.0,
-1.0, 1.0, -1.0, 1.0,
-1.0, 1.0, 1.0, 1.0,
1.0, -1.0, -1.0, 1.0,
1.0, -1.0, 1.0, 1.0,
1.0, 1.0, -1.0, 1.0,
1.0, 1.0, 1.0, 1.0
], dtype=numpy.float32)
vertex_colors = numpy.array([
1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 0.0, 1.0,
1.0, 0.0, 1.0, 1.0,
1.0, 0.0, 0.0, 1.0,
0.0, 1.0, 1.0, 1.0,
0.0, 1.0, 0.0, 1.0,
0.0, 0.0, 1.0, 1.0,
0.5, 0.5, 0.5, 1.0
], dtype=numpy.float32)
self.vertex_buffer_object = vbo.VBO(concatenate((vertex_positions, vertex_colors)))
self.vertex_buffer_object.bind()
self.vertex_indices = vbo.VBO(
numpy.array([
0, 1, 2, 3, 6, 7, 4, 5,
0xFFFF,
2, 6, 0, 4, 1, 5, 3, 7
], dtype=numpy.uint32),
target=GL_ELEMENT_ARRAY_BUFFER)
self.vertex_indices.bind()
glEnable(GL_PRIMITIVE_RESTART)
glPrimitiveRestartIndex(0xFFFF)
glVertexAttribPointer(
self.position_location,
4, GL_FLOAT, False, 0, self.vertex_buffer_object
)
glEnableVertexAttribArray(self.position_location)
glVertexAttribPointer(
self.color_location,
4, GL_FLOAT, False, 0, self.vertex_buffer_object + vertex_positions.nbytes
)
glEnableVertexAttribArray(self.color_location)
glEnable(GL_DEPTH_TEST)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glBlendEquation(GL_FUNC_SUBTRACT)
render_buffer_color = glGenRenderbuffers(1)
render_buffer_depth = glGenRenderbuffers(1)
glBindRenderbuffer(GL_RENDERBUFFER, render_buffer_color)
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA, 256, 256)
glBindRenderbuffer(GL_RENDERBUFFER, render_buffer_depth)
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, 256, 256)
self.framebuffer = glGenFramebuffers(1)
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self.framebuffer)
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_RENDERBUFFER, render_buffer_color)
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER, render_buffer_depth)
def display(self):
if self.current_time is None:
self.current_time = datetime.now()
self.delta_time = datetime.now() - self.current_time
self.current_time = datetime.now()
self.current_angle += 0.000002 * self.delta_time.microseconds
print self.current_angle
try:
# Prepare to render into the renderbuffer
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self.framebuffer)
glViewport(0, 0, 125, 125)
# Render into renderbuffer
glClearColor (1.0, 0.0, 0.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
translation_matrix = identity(4, 'f') # really it scale matrix there
translation_matrix[-1][-1] = 2
glUniformMatrix4fv(self.model_matrix_location, 1 , GL_TRUE, translation_matrix.tolist())
glUniform1f(self.rotation_angle_location, self.current_angle)
glDrawElements(GL_TRIANGLE_STRIP, 17, GL_UNSIGNED_INT, self.vertex_indices)
# Set up to read from the renderbuffer and draw to window-system framebuffer
glBindFramebuffer(GL_READ_FRAMEBUFFER, self.framebuffer)
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0)
glViewport(0, 0, 250, 250)
glClearColor(0.0, 0.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Do the copy
glBlitFramebuffer(0, 0, 125, 125, 0, 0, 125, 125,
GL_COLOR_BUFFER_BIT, GL_NEAREST)
glutSwapBuffers()
finally:
glFlush()
glutPostRedisplay()
if __name__ == '__main__':
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH | GLUT_STENCIL)
glutInitWindowSize(250, 250)
glutInitWindowPosition(100, 100)
glutCreateWindow("sample 15")
sample = Sample15()
glutDisplayFunc(sample.display)
glutIdleFunc(sample.display)
glutMainLoop() | [
"haehn@seas.harvard.edu"
] | haehn@seas.harvard.edu |
5724deb6328a7ed2eff59a247c27787e01e42523 | f6e3712729f542776d5b7fbf0e69eebba2002ddc | /xbx-coin-2020/program/2_牛刀小试/3_构建自己的数字货币数据库/1_神奇的网址-什么是API.py | dbf40638b7f3edb5b8444edfbe9aabbe0aa3d5a2 | [] | no_license | chouchouyu/xbx | a89cd3e852646dde1eabcf0965abd3d743b7619e | 576f8dd97ca917f1efd8f781571d36dc68e020cf | refs/heads/main | 2023-06-11T01:07:16.153340 | 2021-06-27T14:37:01 | 2021-06-27T14:37:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,284 | py | """
《邢不行-2020新版|Python数字货币量化投资课程》
无需编程基础,助教答疑服务,专属策略网站,一旦加入,永续更新。
课程详细介绍:https://quantclass.cn/crypto/class
邢不行微信: xbx9025
本程序作者: 邢不行
# 课程内容
通过案例介绍什么是API
"""
# =====神奇的网址-API示例
# okex:https://www.okex.com/api/spot/v3/instruments/BTC-USDT/ticker
# 返回结果示例:{"best_ask":"7871.9","best_bid":"7871.8","instrument_id":"BTC-USDT","product_id":"BTC-USDT","last":"7872.7","last_qty":"0.00294821","ask":"7871.9","best_ask_size":"2.47032541","bid":"7871.8","best_bid_size":"0.3586146","open_24h":"8090.4","high_24h":"8090.4","low_24h":"7637.4","base_volume_24h":"71999.67613995","timestamp":"2020-03-10T03:27:31.069Z","quote_volume_24h":"564510343.1"}
# 火币:https://api.huobi.pro/market/detail/merged?symbol=btcusdt
# 返回结果示例:{"status":"ok","ch":"market.btcusdt.detail.merged","ts":1583810974164,"tick":{"amount":71311.94804854663,"open":8082.13,"close":7890.19,"high":8082.14,"id":210146022322,"count":561789,"low":7638.0,"version":210146022322,"ask":[7890.98,0.58188],"vol":5.587285592033827E8,"bid":[7888.83,0.061314]}}
# 币安:https://api.binance.com/api/v3/ticker/24hr?symbol=BTCUSDT
# 返回结果示例:
# 修改免翻墙域名
# 将网址中的okex.com改为okex.me:https://www.okex.me/api/spot/v3/instruments/BTC-USDT/ticker
# 该网址之后可能会失效
# 这个就是API,官方提供给我们用来从交易所获取数据的网址
# =====修改参数
# 将okex网址中的btc改为ltc:https://www.okex.com/api/spot/v3/instruments/LTC-USDT/ticker
# 可以在神奇的网址中修改相关参数,来达到获取不同数据的目的
# =====哪里找到神奇的网址
# 交易所官网,有详细的文档。
# 交易所能提供的数据,全在官网上,可以仔细阅读,数据是量化之源。
# =====每家交易所不一样
# 每家请求的内容不一样:网址、币种的格式
# 返回的结果不一样:格式
# 能提供的数据种类不一样
# 限制也不一样
# 稳定性也不同:决定了一家交易所的技术实力
# =====其他
# 不同人使用的接口不一样:内部、大户、散户
| [
"cshan.kong@gmail.com"
] | cshan.kong@gmail.com |
cf08bf0bb104fef5a745b400db13f1ea38cb274f | d1f39d9d16f7a4cb080530bd6425b8bc4063dee2 | /realtors/admin.py | 8df4dd731bebe351cf7a3dcc350a2afc0b879dfe | [] | no_license | kdogan11/rf | 7a721c0e91f5dfb773a0716e2958b10a3d9175c6 | 1984523845f23ba73c0436ef00ba40d5b1711403 | refs/heads/master | 2020-04-10T10:42:34.497136 | 2018-12-08T20:07:35 | 2018-12-08T20:07:35 | 160,973,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from django.contrib import admin
from .models import Realtor
class RealtorAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'hire_date')
list_display_links = ('id', 'name')
search_fields = ('name',)
list_per_page = 20
admin.site.register(Realtor, RealtorAdmin)
| [
"kdogan11@gmail.com"
] | kdogan11@gmail.com |
36f0c4bcf26e7b2e5d4803072ba1ccf8417af449 | ae9176c79058219ec5d531f7d38fd268b6dcf928 | /python/dgl/distributed/rpc_server.py | f5868027d3a72819a1bb794d59935abe9e161af2 | [
"Apache-2.0"
] | permissive | ryusidjin/dgl | 7fbb4203ed5a0a15d429033e417e18c63306eee6 | 0a4e8b32b6b31f2b833476fe5ab129075ee632e2 | refs/heads/master | 2022-11-02T04:48:58.320027 | 2020-06-18T11:43:42 | 2020-06-18T11:43:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,226 | py | """Functions used by server."""
import time
from . import rpc
from .constants import MAX_QUEUE_SIZE
def start_server(server_id, ip_config, num_clients, server_state, \
max_queue_size=MAX_QUEUE_SIZE, net_type='socket'):
"""Start DGL server, which will be shared with all the rpc services.
This is a blocking function -- it returns only when the server shutdown.
Parameters
----------
server_id : int
Current server ID (starts from 0).
ip_config : str
Path of IP configuration file.
num_clients : int
Total number of clients that will be connected to the server.
Note that, we do not support dynamic connection for now. It means
that when all the clients connect to server, no client will can be added
to the cluster.
server_state : ServerSate object
Store in main data used by server.
max_queue_size : int
Maximal size (bytes) of server queue buffer (~20 GB on default).
Note that the 20 GB is just an upper-bound because DGL uses zero-copy and
it will not allocate 20GB memory at once.
net_type : str
Networking type. Current options are: 'socket'.
"""
assert server_id >= 0, 'server_id (%d) cannot be a negative number.' % server_id
assert num_clients >= 0, 'num_client (%d) cannot be a negative number.' % num_client
assert max_queue_size > 0, 'queue_size (%d) cannot be a negative number.' % queue_size
assert net_type in ('socket'), 'net_type (%s) can only be \'socket\'' % net_type
# HandleCtrlC Register for handling Ctrl+C event
rpc.register_ctrl_c()
# Register some basic services
rpc.register_service(rpc.CLIENT_REGISTER,
rpc.ClientRegisterRequest,
rpc.ClientRegisterResponse)
rpc.register_service(rpc.SHUT_DOWN_SERVER,
rpc.ShutDownRequest,
None)
rpc.set_rank(server_id)
server_namebook = rpc.read_ip_config(ip_config)
machine_id = server_namebook[server_id][0]
rpc.set_machine_id(machine_id)
ip_addr = server_namebook[server_id][1]
port = server_namebook[server_id][2]
rpc.create_sender(max_queue_size, net_type)
rpc.create_receiver(max_queue_size, net_type)
# wait all the senders connect to server.
# Once all the senders connect to server, server will not
# accept new sender's connection
print("Wait connections ...")
rpc.receiver_wait(ip_addr, port, num_clients)
print("%d clients connected!" % num_clients)
# Recv all the client's IP and assign ID to clients
addr_list = []
client_namebook = {}
for _ in range(num_clients):
req, _ = rpc.recv_request()
addr_list.append(req.ip_addr)
addr_list.sort()
for client_id, addr in enumerate(addr_list):
client_namebook[client_id] = addr
for client_id, addr in client_namebook.items():
client_ip, client_port = addr.split(':')
rpc.add_receiver_addr(client_ip, client_port, client_id)
time.sleep(3) # wait client's socket ready. 3 sec is enough.
rpc.sender_connect()
if rpc.get_rank() == 0: # server_0 send all the IDs
for client_id, _ in client_namebook.items():
register_res = rpc.ClientRegisterResponse(client_id)
rpc.send_response(client_id, register_res)
# main service loop
while True:
try:
req, client_id = rpc.recv_request()
res = req.process_request(server_state)
if res is not None:
if isinstance(res, list):
for response in res:
target_id, res_data = response
rpc.send_response(target_id, res_data)
elif isinstance(res, str) and res == 'exit':
break # break the loop and exit server
else:
rpc.send_response(client_id, res)
except KeyboardInterrupt:
print("Exit kvserver!")
rpc.finalize_sender()
rpc.finalize_receiver()
except:
print("Error on kvserver!")
rpc.finalize_sender()
rpc.finalize_receiver()
raise
| [
"noreply@github.com"
] | ryusidjin.noreply@github.com |
48c85b392ffbdf8089044c9b25e3ff46bdcbf0b8 | 5425b9dbb15da20faaca1cfd98cebef8a5423216 | /src/object_list/scripts/Objektlist_Visualization.py | 1355abccfd25eea82346f0bfccfc3f5b4ef37e73 | [] | no_license | RedgeCastelino/Master_thesis_shared | ce30be3906f6968859c93e508cbe4ace56de0237 | de2f4b229f3df4f219a08f3d4d7e8d3d40750c55 | refs/heads/main | 2023-03-12T12:32:36.555096 | 2021-03-01T14:34:57 | 2021-03-01T14:34:57 | 343,441,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,501 | py | #!/usr/bin/env python
import roslib; roslib.load_manifest('visualization_marker_tutorials')
import rospy
from std_msgs.msg import String
from object_list.msg import ObjectsList
from object_list.msg import ObjectList
from osi3_bridge.msg import TrafficUpdateMovingObject
from visualization_msgs.msg import Marker
from visualization_msgs.msg import MarkerArray
import rospy
import math
import tf
OFFSET_CAR_X = -2.3 # distance to front
car_ego_x = 0
car_ego_y = 0
data_alt = 0
topic = 'visualization_marker_array'
publisher = rospy.Publisher(topic, MarkerArray,queue_size=10)
rospy.init_node('Objekt_Visualization')
br = tf.TransformBroadcaster()
#define each color to the specific class, input value ist the name(string) from the classifciation
def evaluateColor(Class):
class_List = {
"car": [1,0,0,1],
"truck":[0,1,0,1],
"motorcycle": [0,0,1,1],
"bicycle": [1,1,0,1],
"pedestrian": [1,0,1,3],
"stacionary": [0,1,1,3],
"other":[1,1,1,2]
}
return class_List.get(Class)
def evaluateClassification(objectClass):
temp_prop = 0
result = ""
#tmp includes all Attributes of the message Classification
tmp = [a for a in dir(objectClass) if not a.startswith('__') and not a.startswith('_') and not callable(getattr(objectClass,a))]
for i in range(len(tmp)):
if(getattr(objectClass, tmp[i]) > temp_prop ):
temp_prop = getattr(objectClass, tmp[i])
result = tmp[i]
return (result) # return value is the name of the class whith the highest probability
def evaluateObject(objectData):
marker = Marker()
r, g, b, typ = evaluateColor(evaluateClassification(objectData.classification))
marker.header.frame_id = "/chassis"
marker.type = typ
marker.action = marker.ADD
marker.scale.x = objectData.dimension.length
marker.scale.y = objectData.dimension.width
marker.scale.z = 2.0
marker.color.a = 1.0
marker.color.r = r
marker.color.g = g
marker.color.b = b
marker.pose.orientation.w = 1.0
marker.pose.position.x = car_ego_x + objectData.geometric.x
marker.pose.position.y = car_ego_y + objectData.geometric.y * (-1)
marker.pose.position.z = 1.0
marker.lifetime = rospy.Duration(0.1)
return marker
def evaluateObjectID(objectData):
marker = Marker()
marker.header.frame_id = "/chassis"
marker.type = marker.TEXT_VIEW_FACING
marker.action = marker.ADD
marker.scale.x = 2
marker.scale.y = 2
marker.scale.z = 1
marker.color.a = 1.0
marker.color.r = 0.3
marker.color.g = 0.4
marker.color.b = 1.0
marker.pose.orientation.w = 1.0
marker.pose.position.x = car_ego_x + objectData.geometric.x
marker.pose.position.y = car_ego_y + objectData.geometric.y * (-1)
marker.pose.position.z = 2.0 + 1
marker.lifetime = rospy.Duration(0.1)
marker.text = "ID:" + str(objectData.obj_id)
return marker
def callback_simulation(data):
global car_ego_x
global car_ego_y
markerArray = MarkerArray()
for i in range(len(data.obj_list)):
markerObj = evaluateObject(data.obj_list[i])
markerObj.id = i*2
markerID = evaluateObjectID(data.obj_list[i])
markerID.id = i*2+1
markerArray.markers.append(markerObj)
markerArray.markers.append(markerID)
rospy.loginfo(markerArray)
publisher.publish(markerArray)
def callback_egovehicle(data):
global car_ego_x
global car_ego_y
car_ego_x = data.object.position.x
car_ego_y = data.object.position.y
br.sendTransform((car_ego_x,car_ego_y,0),tf.transformations.quaternion_from_euler(data.object.orientation.roll,data.object.orientation.pitch,data.object.orientation.yaw),rospy.Time.now(),"chassis","base_link")
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# name are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
#rospy.Subscriber("chatter", String, callback)
rospy.Subscriber('/sensor0/', ObjectsList, callback_simulation)
rospy.Subscriber('/ego_data', TrafficUpdateMovingObject, callback_egovehicle)
# spin() simply keeps python from exiting until this node is stopped
rospy.spin()
if __name__ == '__main__':
listener()
| [
"maikol.drechsler@carissma.eu"
] | maikol.drechsler@carissma.eu |
64e273894e3f06a00b2626f4f97c07d635c6e674 | 5be7ec24889fd14b2f1d32b127c44bdcfe379bda | /Python/quick_sort.py | b93df5393c8c119593473ba8ad31145bec675221 | [] | no_license | utpalendubarman/JWoC_Algorithms | 54dd055caa433ba099a66afe1f4268850be8fefa | 5162fb57dd7232d5de5c7039213a9835a136856b | refs/heads/master | 2020-12-29T18:21:02.521195 | 2020-02-06T13:40:49 | 2020-02-06T13:40:49 | 238,697,510 | 1 | 0 | null | 2020-02-06T13:39:55 | 2020-02-06T13:39:54 | null | UTF-8 | Python | false | false | 689 | py | # quick sort in python by ankit raj
def part(array, low, high):
i = low - 1
pivot = array[high]
for j in range(low, high):
if array[j] <= pivot:
i += 1
array[i], array[j] = array[j], array[i]
array[i + 1], array[high] = array[high], arrray[i + 1]
return (i + 1)
def quick_sort(array, low, high):
if low < high:
mid = partition(array, low, high)
quick_sort(array, low, mid - 1)
quick_sort(array, mid, high)
n = int(input('enter size of an array'))
array = input('Ener the elements of array ').split()
array = [int(x) for x in array]
insertion_sort(array)
print('Sorted list: ', end = '')
print(array)
| [
"noreply@github.com"
] | utpalendubarman.noreply@github.com |
70b8ff468022456ea5bf19db8b9f77c1107261e9 | 747b66827973517a05d9d060d4b3ce041c9b2ecb | /livre.py | f2b48c79e1a6b37d1005493d687eda7b1372fc27 | [] | no_license | Maxoubzh/Projet2 | 43ff9353726b1cbbe85defe039376982491a1713 | 06808ac023b6a1fa8fb0dfae15260e26f0cbe4fe | refs/heads/master | 2023-05-01T05:19:57.140436 | 2021-05-23T15:19:33 | 2021-05-23T15:19:33 | 365,741,479 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,653 | py | import requests
from bs4 import BeautifulSoup as BS
import csv
import re
def livre(url,category,fichiercsv):
urlbase = 'http://books.toscrape.com'
response = requests.get(url)
if response.ok:
soup = BS(response.content, features="html.parser")
imageUrl = soup.find('div', id="product_gallery").find('img')['src']
imageUrl = imageUrl[6:]
imageUrl = (urlbase, imageUrl)
imageUrl = '/'.join(imageUrl)
nomImage = soup.find('div', id="product_gallery").find('img')['alt']
if len(nomImage)>20:
nomImage = nomImage[:19]
nomImage = (nomImage, 'jpg')
nomImage = '.'.join(nomImage)
urllib.request.urlretrieve(imageUrl, nomImage)
title = soup.find('title').text
UPC = soup.find('th', string='UPC').next_sibling.text
PriceWithTax = soup.find('th', string='Price (incl. tax)').next_sibling.text
PriceExcludingTax = soup.find('th', string='Price (excl. tax)').next_sibling.text
numberAvailable = soup.find('th', string='Availability').next_sibling.next_sibling.text
numberAvailable = re.findall("\d+", numberAvailable)
numberAvailable = numberAvailable[0]
productDescription = soup.find('div', id="product_description").next_sibling.next_sibling.text
nbReview = soup.find('th', string='Number of reviews').next_sibling.next_sibling.text
with open(fichiercsv, 'a', newline='') as fichiercsv:
writer = csv.writer(fichiercsv)
writer.writerow([url, UPC, title, PriceExcludingTax, PriceWithTax, numberAvailable, nbReview, imageUrl,productDescription, category])
| [
"donne.maxime@gmail.com"
] | donne.maxime@gmail.com |
267300a0c6be411af5da94c956325769ac8c743b | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_55/726.py | 2e1116bc8beaa4f056422822aff316eb7493302d | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,012 | py | #!/usr/bin/env python
import sys
def Take(k, gs, i):
'Memoize this.'
queue = gs[i:] + gs[:i]
taken = 0
N = len(gs)
f = 0
while k and f < N:
next_group = queue[f]
if next_group > k:
break
k -= next_group
taken += next_group
f += 1
return taken, f+i
def Euros(R, k, gs):
i = 0
euros = 0
N = len(gs)
_done = dict()
while R:
if i not in _done:
_done[i] = Take(k, gs, i)
(taken, i) = _done[i]
if taken == 0:
# We can go no further!
return euros
#print taken, i
euros += taken
i = i % N
R -= 1
return euros
def main():
it = iter(sys.stdin)
T = int(next(it))
for x in range(1, T+1):
R, k, N = map(int, next(it).split())
gs = map(int, next(it).split())
assert len(gs) == N
y = Euros(R, k, gs)
print "Case #%d: %d" %(x, y)
if __name__=='__main__':
main()
| [
"miliar1732@gmail.com"
] | miliar1732@gmail.com |
4bfb6153a6a1331122310bcd35d0ddd45cc654dd | 86c85939a566e11c87ef0cd0668ba2dd29e83b7b | /tests/core/val-type/test_val_type.py | d518d13effb9aaecb0cd406d03956f131791d13a | [
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | ethereum/py-wasm | eca49823f5a683f125d89ed6a9c45e5f5eee7139 | 41a6d07a620dfc4f590463dd038dffe4efe0c8c6 | refs/heads/master | 2023-08-02T00:39:43.402121 | 2019-03-05T03:29:25 | 2019-03-05T03:29:25 | 161,232,280 | 94 | 20 | NCSA | 2023-02-17T18:50:24 | 2018-12-10T20:25:19 | Python | UTF-8 | Python | false | false | 1,965 | py | import itertools
import pytest
from wasm.datatypes import (
BitSize,
ValType,
)
@pytest.mark.parametrize(
'get_X_type,bit_size',
itertools.product(
[ValType.get_integer_type, ValType.get_float_type],
(0, 31, 33, 63, 65, BitSize.b8, BitSize.b16),
),
)
def test_get_X_type_invalid_bit_size(get_X_type, bit_size):
with pytest.raises(ValueError):
get_X_type(bit_size)
@pytest.mark.parametrize(
'value,expected',
(
(BitSize.b32, ValType.f32),
(BitSize.b64, ValType.f64),
)
)
def test_get_float_type(value, expected):
actual = ValType.get_float_type(value)
# using `is` comparison here to ensure that we are using the same object,
# not just an equal string.
assert actual is expected
@pytest.mark.parametrize(
'value,expected',
(
(BitSize.b32, ValType.i32),
(BitSize.b64, ValType.i64),
)
)
def test_get_integer_type(value, expected):
actual = ValType.get_integer_type(value)
# using `is` comparison here to ensure that we are using the same object,
# not just an equal string.
assert actual is expected
@pytest.mark.parametrize(
'value,expected',
(
(ValType.f32, True),
(ValType.f64, True),
(ValType.i32, False),
(ValType.i64, False),
)
)
def test_is_float_type(value, expected):
assert value.is_float_type is expected
@pytest.mark.parametrize(
'value,expected',
(
(ValType.f32, False),
(ValType.f64, False),
(ValType.i32, True),
(ValType.i64, True),
)
)
def test_is_integer_type(value, expected):
assert value.is_integer_type is expected
@pytest.mark.parametrize(
'value,expected',
(
(ValType.f32, BitSize.b32),
(ValType.f64, BitSize.b64),
(ValType.i32, BitSize.b32),
(ValType.i64, BitSize.b64),
),
)
def test_get_bit_size(value, expected):
assert value.bit_size == expected
| [
"pipermerriam@gmail.com"
] | pipermerriam@gmail.com |
7690f52cb5115a777d61606b3ee0927204e0e972 | cab961b7418cedbc8a20630964577af25f236f95 | /com/liyang/qq/client/login_frame.py | 974855abf27771f774a607cf55db4770e1cbbfba | [] | no_license | ac97ac97/ImitationChat2006 | 89d2d2ee9e787114cfed5b40994b71d447ea1625 | 93b3e95b43f9da517e20a1988f6862805000edb3 | refs/heads/master | 2022-06-17T11:56:44.160874 | 2019-11-12T00:58:37 | 2019-11-12T00:58:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,555 | py | """用户登录窗口"""
import json
from com.liyang.qq.client.my_frame import *
from com.liyang.qq.client.friends_frame import FriendsFrame
class LoginFrame(MyFrame):
def __init__(self):
super().__init__(title='QQ 登录', size=(340, 255))
# 创建顶部图片
topimage = wx.Bitmap('F:\\Python_project\\QQ2006\\resources\\images\\qq11.png', wx.BITMAP_TYPE_PNG)
topimage_sb = wx.StaticBitmap(self.contentpanel, bitmap=topimage)
# 创建界面控件
middlepannel = wx.Panel(self.contentpanel, style=wx.BORDER_DOUBLE)
accountid_st = wx.StaticText(middlepannel, label='QQ号码')
password_st = wx.StaticText(middlepannel, label='QQ密码')
self.accountid_txt = wx.TextCtrl(middlepannel)
self.password_txt = wx.TextCtrl(middlepannel, style=wx.TE_PASSWORD)
st = wx.StaticText(middlepannel, label='忘记密码')
st.SetForegroundColour(wx.BLUE)
# 创建flexgrid布局fgs对象
fgs = wx.FlexGridSizer(3, 3, 8, 15)
fgs.AddMany([
(accountid_st, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT | wx.FIXED_MINSIZE),
(self.accountid_txt, 1, wx.CENTER | wx.EXPAND),
wx.StaticText(middlepannel),
(password_st, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT | wx.FIXED_MINSIZE),
(self.password_txt, 1, wx.CENTER | wx.EXPAND),
(st, 1, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT | wx.FIXED_MINSIZE),
wx.StaticText(middlepannel),
(wx.CheckBox(middlepannel, -1, '自动登录'), 1, wx.CENTER | wx.EXPAND),
(wx.CheckBox(middlepannel, -1, '隐身登录'), 1, wx.CENTER | wx.EXPAND)
])
# 设置FlexGrid布局对象
fgs.AddGrowableRow(0, 1)
fgs.AddGrowableRow(1, 1)
fgs.AddGrowableCol(0, 1)
fgs.AddGrowableCol(1, 1)
fgs.AddGrowableCol(2, 1)
panelbox = wx.BoxSizer()
panelbox.Add(fgs, 1, wx.CENTER | wx.ALL | wx.EXPAND, border=10)
middlepannel.SetSizer(panelbox)
# 创建按扭对象
okb_btn = wx.Button(parent=self.contentpanel, label='确定')
self.Bind(wx.EVT_BUTTON, self.okb_btn_onclick, okb_btn)
cancel_btn = wx.Button(parent=self.contentpanel, label='取消')
self.Bind(wx.EVT_BUTTON, self.cancel_btn_onclick, cancel_btn)
# 创建水平的hbox对象
hbox = wx.BoxSizer(wx.HORIZONTAL)
hbox.Add(wx.Button(parent=self.contentpanel, label='申请号码'), 1, wx.CENTER | wx.ALL | wx.EXPAND, border=10)
hbox.Add(okb_btn, 1, wx.CENTER | wx.ALL | wx.EXPAND, border=10)
hbox.Add(cancel_btn, 1, wx.CENTER | wx.ALL | wx.EXPAND, border=10)
# 创建垂直box将fgs和hbox添加到垂直box上
vbox = wx.BoxSizer(wx.VERTICAL)
vbox.Add(topimage_sb, -1, wx.CENTER | wx.EXPAND)
vbox.Add(middlepannel, -1, wx.CENTER | wx.ALL | wx.EXPAND, border=5)
vbox.Add(hbox, -1, wx.CENTER | wx.BOTTOM, border=1)
self.contentpanel.SetSizer(vbox)
def okb_btn_onclick(self, event):
# 确定按钮事件处理
account = self.accountid_txt.GetValue()
password = self.password_txt.GetValue()
user = self.login(account, password)
# 加入线程后修改此处if语句
if user is not None:
logger.info('登录成功')
next_frame = FriendsFrame(user)
next_frame.Show()
self.Hide()
else:
logger.info('登录失败')
dlg = wx.MessageDialog(self, '您的QQ号或密码不正确', '登录失败', wx.OK | wx.ICON_ERROR)
dlg.ShowModal()
dlg.Destroy()
def cancel_btn_onclick(self, event):
# 取消按钮事件
self.Destroy()
sys.exit(0)
def login(self, userid, password):
"""客户端向服务端发送登录请求"""
# 登录处理
json_obj = {}
json_obj['command'] = COMMAND_LOGIN
json_obj['user_id'] = userid
json_obj['user_pwd'] = password
# Json编码
json_str = json.dumps(json_obj)
# 给服务器发送数据
client_socket.sendto(json_str.encode(), server_address)
# 从服务器端接收数据
json_data, _ = client_socket.recvfrom(1024)
# json解码
json_obj = json.loads(json_data.decode())
logger.info('从服务端获接收数据:{0}'.format(json_obj))
if json_obj['result'] == 0:
#登录成功
return json_obj
| [
"1727346812@qq.com"
] | 1727346812@qq.com |
dd0aef16bc455b60c3f86bf47e9344c5636d6733 | 20a2c317b1917b5929914438f539ef70f09865af | /manage.py | 90ace4c6c9a3794014b141a05a83980310c3ba3f | [] | no_license | AdrianRamirezSalazar/SistemaChat_Tickets | 4df74a5890ba11a8dd151b5f656621b0991ab22a | 719b04bbaecf5d4564ac029bf3dbcce53e5abd9f | refs/heads/master | 2023-01-13T10:14:18.327000 | 2020-11-12T23:14:08 | 2020-11-12T23:14:08 | 309,481,570 | 0 | 1 | null | 2020-11-12T23:14:09 | 2020-11-02T20:02:36 | JavaScript | UTF-8 | Python | false | false | 669 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Plataforma_CT.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"aramirez@finkok.com.mx"
] | aramirez@finkok.com.mx |
b5c06d35f02a957b85d2d53b35b03a8d001f3549 | 6a6b924af29b1e7ca209b844965e165d1c254ff9 | /env/bin/wheel | 8f16476e09cc6f17581c524ab1492af21df37d3d | [] | no_license | kaisucode/django_youtube_downloader | 8b5afdce888d61213db2251b9697cbbd31dae063 | fd28577a22f15a7d00c34f620bf917be9c9d5920 | refs/heads/master | 2020-09-05T10:36:47.633712 | 2018-08-03T18:27:14 | 2018-08-03T18:27:14 | 220,075,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 241 | #!/home/kevin/projects/ytToMp3/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"kevin.hsukaihao@gmail.com"
] | kevin.hsukaihao@gmail.com | |
28add63f80f464506dd4b32adbc43f9b991dc2eb | 7aa0e66cf69423a72f8d7e9722b83219a41efc17 | /src/accuracy_metrics.py | 11ea6bf80136fc16faae3a497ac409a8b968ab6d | [] | no_license | yuyangyg/pspnet | a2611bc93e3418feccf0ef03de28370523ab2eab | 40f8ee443403bef3ff200419c3d4050992ed6cf8 | refs/heads/master | 2020-03-28T15:31:23.752697 | 2018-07-09T19:08:23 | 2018-07-09T19:08:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,771 | py | import numpy as np
from PIL import Image
def pixel_accuracy(image1,image2):
image1=np.array(image1)
image2=np.array(image2)
[row,col]=image1.shape
image1=np.reshape(image1,(row*col,1))
image2=np.reshape(image2,(row*col,1))
count=0
total_count=0
for i in range(row*col):
total_count+=1
if(image1[i]==image2[i]):
count+=1
return count/total_count
def mean_accuracy(image1,image2,num_classes):
image1=np.array(image1)
image2=np.array(image2)
[row,col]=image1.shape
correct_labels=np.zeros((num_classes,1))
incorrect_labels=np.zeros((num_classes,1))
image1=np.reshape(image1,(row*col,1))
image2=np.reshape(image2,(row*col,1))
for i in range(row*col):
if(image1[i]==image2[i]):
correct_labels[image2[i]]+=1
else:
incorrect_labels[image2[i]]+=1
return ((sum(correct_labels/(correct_labels+incorrect_labels+1e-8)))[0]/sum((correct_labels+incorrect_labels)>0)[0]);
def mean_IU(image1,image2,num_classes):
image1=np.array(image1)
image2=np.array(image2)
[row,col]=image1.shape
correct_predictions=np.zeros((num_classes,1))
incorrect_predictions=np.zeros((num_classes,1))
correct_labels=np.zeros((num_classes,1))
incorrect_labels=np.zeros((num_classes,1))
image1=np.reshape(image1,(row*col,1))
image2=np.reshape(image2,(row*col,1))
for i in range(row*col):
if(image1[i]==image2[i]):
correct_predictions[image1[i]]+=1
correct_labels[image1[i]]+=1
else:
incorrect_predictions[image1[i]]+=1
incorrect_labels[image2[i]]+=1
return ((sum(correct_predictions/(correct_predictions+incorrect_predictions+incorrect_labels+1e-8)))[0]
/sum((correct_predictions+incorrect_predictions+incorrect_labels)>0)[0])
#im1=np.array([[0,0,0],[1,1,1]])
#im2=np.array([[1,0,1],[0,0,0]])
#print(mean_accuracy(im1,im2,3)) | [
"niharmehta79@gmail.com"
] | niharmehta79@gmail.com |
a32c696a24084aaada192cbdb75a74f72eb89d86 | 84f8e3b73306c7088e5cd504e1db13cebc0e023f | /main.py | 04531866d349729db6f84855394a49a5142e0619 | [] | no_license | gatekeepr/telequotes | d0a90d94dbe747cb10f34726442ac11a28bdb004 | b84a0b2f99c4844451021d5c5947897d3ecafd4f | refs/heads/master | 2020-07-28T10:42:14.908867 | 2019-11-06T12:51:16 | 2019-11-06T12:51:16 | 209,396,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,769 | py | from telegram.ext import Updater, CommandHandler
from random import randrange
from gtts import gTTS
import logging
import csv
import time
import os
# initialising
token = open("config.txt", "r").read().strip()
legalusers = list(map(int, open("legalusers.txt", "r").readlines()))
updater = Updater(token=token, use_context=True)
dispatcher = updater.dispatcher
# counts all lines in the quotefile
def countquotes(filename):
with open(filename) as f:
return sum(1 for line in f)
# creates an mp3 for a given string
def quoteToAudio(text):
ttsobj = gTTS(text=text, lang='de', slow=False)
ttsobj.save("Kek.mp3")
# checks if the querying user is allowed to use the bot
def checkValidUser(update, context):
if update.message.chat.id not in legalusers:
context.bot.send_message(
chat_id=update.message.chat_id, text="Sorry, this is a private Bot!"
)
return False
else:
return True
# creates a list with quotes containing a keyword
def createCandidate(**kwargs):
candidates = []
keyword = kwargs.get('keyword', None)
counter = countquotes("quotes.csv")
pseudorandom = randrange(counter - 1)
with open("quotes.csv", "r", encoding="utf-8") as csvfile:
fieldnames = ["id", "username", "date", "quote"]
reader = csv.DictReader(csvfile, delimiter=",")
if(keyword):
for row in reader:
quote = row["quote"]
if keyword in quote:
candidates.append(row)
counter = len(candidates)
if counter == 0:
context.bot.send_message(
chat_id=update.message.chat_id, text="Nothing found!"
)
return -1
else:
pseudorandom = randrange(counter)
for row in candidates:
if candidates.index(row) == pseudorandom:
quote = row["quote"]
else:
for row in reader:
if reader.line_num - 2 == pseudorandom:
quote = row["quote"]
return quote
# sends a random quote as a text message
def tts(update, context):
if checkValidUser(update, context):
keyword = False
if context.args:
if len(context.args[0]) > 1:
keyword = context.args[0]
text = createCandidate(keyword=keyword)
if(text is -1):
return
quoteToAudio(text)
context.bot.send_audio(
chat_id=update.message.chat_id, audio=open('Kek.mp3', 'rb'))
os.system("rm Kek.mp3")
# adds a quote to the database
def add(update, context):
if checkValidUser(update, context):
# collect information from message
text = update.message.reply_to_message.text
date = str(update.message.reply_to_message.date.now())
user = update.message.reply_to_message.from_user.first_name
# write data to csv
with open("quotes.csv", "a", encoding="utf-8") as csvfile:
fieldnames = ["id", "username", "date", "quote"]
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
counter = countquotes("quotes.csv")
writer.writerow(
{"id": counter, "username": user, "date": date, "quote": text}
)
# sends a fully random quote if no keyword is passed
def random(update, context):
if checkValidUser(update, context):
mode = 1
amount = 1
cycleAmount = 0
quotelist = []
keyword = False
if context.args:
# keyword specified in message, save it
if len(context.args[0]) > 1:
# if a number is also passed as second argument cycle multiple times
if len(context.args) > 1:
if len(context.args[1]) == 1:
amount = int(context.args[1])
keyword = context.args[0]
mode = 2
# if a number is passed send that many quotes
elif len(context.args[0]) == 1:
amount = int(context.args[0])
mode = 3
# repeat process if desired
while cycleAmount < amount:
quotelist.append(createCandidate(keyword=keyword))
if(quotelist[0] is -1):
return
context.bot.send_message(
chat_id=update.message.chat_id, text=quotelist[cycleAmount])
cycleAmount += 1
time.sleep(1)
# deplpy dispatcher and wait for messages
dispatcher.add_handler(CommandHandler("add", add))
dispatcher.add_handler(CommandHandler("random", random))
dispatcher.add_handler(CommandHandler("tts", tts))
updater.start_polling()
| [
"gate@black-mesa.xyz"
] | gate@black-mesa.xyz |
8a2cd49309ce0d18dfc703d2a07c68b878bc6c0a | 0d0ba103050607a7e4994ee5253140f3e9c13f6f | /2-2.py | 3acd2522e79c0baaae77ea8d029c274ec790ad12 | [] | no_license | yinyangguaiji/yygg-C1 | ac30d8ae5fd3a51985723acd527844860e658dd6 | 08ef81785a44715680b2f8543ac665424f8ce63b | refs/heads/master | 2023-03-12T09:25:22.747657 | 2021-02-19T07:24:33 | 2021-02-19T07:24:33 | 332,170,674 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | message="加油!fighing!"
print(message)
message="I can do it!"
print(message)
| [
"noreply@github.com"
] | yinyangguaiji.noreply@github.com |
59c40ad3fafc1e69fd4b6ce87d4e11042b64a5e7 | cab4a76fc9ab56ce17a4375e47647c73670c53f6 | /portfolio/portfolio/settings.py | 59af2de31af963231cb56b72b3c622301d158701 | [] | no_license | Deserve82/web-portfolio | 379def39908216aa1fa1e83bccb622e0011b9ace | fb70a0021011b80e1904d54600ff2785395d738c | refs/heads/master | 2023-01-11T14:19:07.053198 | 2020-11-09T09:04:46 | 2020-11-09T09:04:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,059 | py | import json
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
from django.core.exceptions import ImproperlyConfigured
BASE_DIR = Path(__file__).resolve().parent.parent
with open("./secrets.json") as f:
secrets = json.loads(f.read())
def get_secret(setting, secret=secrets):
try:
return secret[setting]
except KeyError:
error_msg = "Set the {} environment variable".format(setting)
raise ImproperlyConfigured(error_msg)
SECRET_KEY = get_secret("SECRET_KEY")
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# local apps
'projects',
'users',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'portfolio.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'portfolio.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': get_secret("DB_NAME"),
'USER': get_secret("DB_USER"),
'PASSWORD': get_secret("DB_PASSWORD"),
'HOST': get_secret("HOST"),
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Seoul'
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
| [
"inth8204@gmail.com"
] | inth8204@gmail.com |
002e071b690a95244acf946849a9c4566b3413e0 | 1ff5a94401dbcb56760005824b428f0d5aae53be | /assgnment1.2.py | 47da13876b201859d60be7970966a30a900667ad | [] | no_license | rajya-lak/assgnment1.2.py | 1a8d89a4caf30213a97321cd5614baa6437956a5 | b57ccb417c941965f63c69c66e9669b3fd9ac701 | refs/heads/main | 2023-08-31T22:37:00.154383 | 2021-10-18T07:00:43 | 2021-10-18T07:00:43 | 418,381,626 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,752 | py | calculator using functions:
def multiplication(num1, num2):
return num1 * num2
def addition(num1, num2):
return num1 + num2
def subtraction(num1, num2):
return num1 - num2
def divide(num1, num2):
return num1 / num2
value1 = int(input("Enter 1st number: "))
value2 = int(input("Enter 2nd number: "))
print("Select operation 1-Division, 2-Multiplication, 3-Addition, 4-Subtraction")
operation = int(input("Choose operation 1/2/3/4: "))
if operation == 1:
print(value1, "/", value2, "=", divide(value1, value2))
elif operation == 2:
print(value1, "*", value2, "=", multiplication(value1, value2))
elif operation == 3:
print(value1, "+", value2, "=", addition(value1, value2))
elif operation == 4:
print(value1, "-", value2, "=", subtraction(value1, value2))
else:
print("enter correct operation");
Output:
Enter 1st Number: 2
Enter 2nd Number: 2
Select operation 1-Division, 2-Multiplication, 3-Addition, 4-Subtraction
Choose operation 1/2/3/4: 1
2 / 2 = 1.0
string method:
Method Description
capitalize() Converts the first character to upper case
casefold() Converts string into lower case
center() Returns a centered string
count() Returns the number of times a specified value occurs in a string
encode() Returns an encoded version of the string
endswith() Returns true if the string ends with the specified value
expandtabs() Sets the tab size of the string
find() Searches the string for a specified value and returns the position of where it was found
format() Formats specified values in a string
format_map() Formats specified values in a string
index() Searches the string for a specified value and returns the position of where it was found
isalnum() Returns True if all characters in the string are alphanumeric
isalpha() Returns True if all characters in the string are in the alphabet
isascii() Returns True if all characters in the string are ascii characters
isdecimal() Returns True if all characters in the string are decimals
isdigit() Returns True if all characters in the string are digits
isidentifier() Returns True if the string is an identifier
islower() Returns True if all characters in the string are lower case
isnumeric() Returns True if all characters in the string are numeric
isprintable() Returns True if all characters in the string are printable
isspace() Returns True if all characters in the string are whitespaces
istitle() Returns True if the string follows the rules of a title
isupper() Returns True if all characters in the string are upper case
join() Joins the elements of an iterable to the end of the string
ljust() Returns a left justified version of the string
lower() Converts a string into lower case
lstrip() Returns a left trim version of the string
maketrans() Returns a translation table to be used in translations
partition() Returns a tuple where the string is parted into three parts
replace() Returns a string where a specified value is replaced with a specified value
rfind() Searches the string for a specified value and returns the last position of where it was found
rindex() Searches the string for a specified value and returns the last position of where it was found
rjust() Returns a right justified version of the string
rpartition() Returns a tuple where the string is parted into three parts
rsplit() Splits the string at the specified separator, and returns a list
rstrip() Returns a right trim version of the string
split() Splits the string at the specified separator, and returns a list
splitlines() Splits the string at line breaks and returns a list
startswith() Returns true if the string starts with the specified value
strip() Returns a trimmed version of the string
swapcase() Swaps cases, lower case becomes upper case and vice versa
title() Converts the first character of each word to upper case
translate() Returns a translated string
upper() Converts a string into upper case
zfill() Fills the string with a specified number of 0 values at the beginning
list methods:
Method Description
append() Adds an element at the end of the list
clear() Removes all the elements from the list
copy() Returns a copy of the list
count() Returns the number of elements with the specified value
extend() Add the elements of a list (or any iterable), to the end of the current list
index() Returns the index of the first element with the specified value
insert() Adds an element at the specified position
pop() Removes the element at the specified position
remove() Removes the first item with the specified value
reverse() Reverses the order of the list
sort() Sorts the list
| [
"noreply@github.com"
] | rajya-lak.noreply@github.com |
8c8a74b218f56dc6033e84179868845505a83d41 | e2d450f1616f3408e3c933f293c3329ffb976607 | /manage.py | 98fccd57d1c0e56cfa0ccae4db54937eecc0c1fd | [] | no_license | marriema/website | 37416c3e6bc640fab89aac410edbfa217e3bb5e7 | c9a763f30773661aa76a32b0ba8380e160696efd | refs/heads/master | 2020-12-24T07:35:58.020260 | 2016-08-28T16:57:00 | 2016-08-28T16:57:00 | 56,889,910 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "illini.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"marriema@wirelessprvnat-172-17-67-79.near.illinois.edu"
] | marriema@wirelessprvnat-172-17-67-79.near.illinois.edu |
dfe4531f5701b5c35ed62682143758ff84c52e3a | 5dcbe8af11275d4b97d52701bab3340799a98a62 | /python/Kaltura/KalturaClient/Plugins/ScheduledTaskEventNotification.py | cb8aede854876ce0cfe04928836e74296d6396e3 | [] | no_license | pjnr1/KalturaExtender | 2d47162e4d7bfcf45cbdfa4cda9e371f33589021 | e84b41819fd37d3474a75e6f6a9cdd9491d8289b | refs/heads/master | 2021-10-16T04:17:31.838083 | 2019-02-07T16:36:56 | 2019-02-07T16:36:56 | 147,160,993 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,050 | py | # ===================================================================================================
# _ __ _ _
# | |/ /__ _| | |_ _ _ _ _ __ _
# | ' </ _` | | _| || | '_/ _` |
# |_|\_\__,_|_|\__|\_,_|_| \__,_|
#
# This file is part of the Kaltura Collaborative Media Suite which allows users
# to do with audio, video, and animation what Wiki platfroms allow them to do with
# text.
#
# Copyright (C) 2006-2018 Kaltura Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http:#www.gnu.org/licenses/>.
#
# @ignore
# ===================================================================================================
# @package Kaltura
# @subpackage Client
from __future__ import absolute_import
from .Core import *
from .ScheduledTask import *
from .EventNotification import *
from ..Base import (
getXmlNodeBool,
getXmlNodeFloat,
getXmlNodeInt,
getXmlNodeText,
KalturaClientPlugin,
KalturaEnumsFactory,
KalturaObjectBase,
KalturaObjectFactory,
KalturaParams,
KalturaServiceBase,
)
########## enums ##########
########## classes ##########
# @package Kaltura
# @subpackage Client
class KalturaDispatchEventNotificationObjectTask(KalturaObjectTask):
def __init__(self,
type=NotImplemented,
stopProcessingOnError=NotImplemented,
eventNotificationTemplateId=NotImplemented):
KalturaObjectTask.__init__(self,
type,
stopProcessingOnError)
# The event notification template id to dispatch
# @var int
self.eventNotificationTemplateId = eventNotificationTemplateId
PROPERTY_LOADERS = {
'eventNotificationTemplateId': getXmlNodeInt,
}
def fromXml(self, node):
KalturaObjectTask.fromXml(self, node)
self.fromXmlImpl(node, KalturaDispatchEventNotificationObjectTask.PROPERTY_LOADERS)
def toParams(self):
kparams = KalturaObjectTask.toParams(self)
kparams.put("objectType", "KalturaDispatchEventNotificationObjectTask")
kparams.addIntIfDefined("eventNotificationTemplateId", self.eventNotificationTemplateId)
return kparams
def getEventNotificationTemplateId(self):
return self.eventNotificationTemplateId
def setEventNotificationTemplateId(self, newEventNotificationTemplateId):
self.eventNotificationTemplateId = newEventNotificationTemplateId
########## services ##########
########## main ##########
class KalturaScheduledTaskEventNotificationClientPlugin(KalturaClientPlugin):
# KalturaScheduledTaskEventNotificationClientPlugin
instance = None
# @return KalturaScheduledTaskEventNotificationClientPlugin
@staticmethod
def get():
if KalturaScheduledTaskEventNotificationClientPlugin.instance == None:
KalturaScheduledTaskEventNotificationClientPlugin.instance = KalturaScheduledTaskEventNotificationClientPlugin()
return KalturaScheduledTaskEventNotificationClientPlugin.instance
# @return array<KalturaServiceBase>
def getServices(self):
return {
}
def getEnums(self):
return {
}
def getTypes(self):
return {
'KalturaDispatchEventNotificationObjectTask': KalturaDispatchEventNotificationObjectTask,
}
# @return string
def getName(self):
return 'scheduledTaskEventNotification'
| [
"jensctl@gmail.com"
] | jensctl@gmail.com |
15980a2b29639432947f497bdf42ab0fc9651da1 | cb6f176fe0acbe6e7150b1cc127a079dee72cdb8 | /src/winter_globalplanner/src/MoveBase/python_movebase/odom_square.py | db8a8f020a2b656d3c0b75c165b1727d69b2379d | [] | no_license | wwhisme/MOI_Robot_Winter | 8f21da4e236eeb463060e305f71f4f7cc1fba7fa | 9f281d3d097bf0178a4ac87f0a56d361a1745184 | refs/heads/master | 2020-06-05T20:58:42.170790 | 2018-01-29T20:45:03 | 2018-01-29T20:45:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,543 | py | #!/usr/bin/env python
#coding=utf-8
""" odom_out_and_back.py
使用/odom话题让机器人移动给定的距离或者旋转给定的角度
"""
import rospy
from geometry_msgs.msg import Twist, Point, Quaternion
import tf
from transform_utils import quat_to_angle, normalize_angle
from math import radians, copysign, sqrt, pow, pi
import PyKDL
import time
class OutAndBack():
def __init__(self):
# Give the node a name
rospy.init_node('out_and_back', anonymous=True)
# Set rospy to execute a shutdown function when exiting
rospy.on_shutdown(self.shutdown)
# queue_size一定要等于1,否则会导致,速度命令进入队列,导致车子无法停止
self.cmd_vel = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
# How fast will we update the robot's movement?
rate = 20
# Set the equivalent ROS rate variable
self.r = rospy.Rate(rate)
# Initialize the tf listener
self.tf_listener = tf.TransformListener()
# Give tf some time to fill its buffer
rospy.sleep(2)
# Set the odom frame
self.odom_frame = '/odom'
# Find out if the robot uses /base_link or /base_footprint
try:
self.tf_listener.waitForTransform(self.odom_frame, '/base_link', rospy.Time(), rospy.Duration(1.0))
self.base_frame = '/base_link'
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
rospy.loginfo("Cannot find transform between /odom and /base_link or /base_footprint")
rospy.signal_shutdown("tf Exception")
def quat_to_angle(quat):
rot = PyKDL.Rotation.Quaternion(quat.x, quat.y, quat.z, quat.w)
return rot.GetRPY()[2]
def MoveX(self,goal_distance,goal_linear_speed):
# Initialize the position variable as a Point type
position = Point()
move_cmd = Twist()
# Get the starting position values
(position, rotation) = self.get_odom()
x_start = position.x
y_start = position.y
current_linear_speed = 0.2
if goal_linear_speed<0:
#当前的线性速度
current_linear_speed = -0.2
# Keep track of the distance traveled
distance = 0
# Enter the loop to move along a side
while distance < goal_distance and not rospy.is_shutdown():
# Publish the Twist message and sleep 1 cycle
move_cmd.linear.x = current_linear_speed
move_cmd.linear.y = 0
self.cmd_vel.publish(move_cmd)
self.r.sleep()
(position, rotation) = self.get_odom()
# Compute the Euclidean distance from the start
distance = sqrt(pow((position.x - x_start), 2) + pow((position.y - y_start), 2))
#在开始走和快要到目的地的时候减小速度,设置加速和减速距离0.2m,0.2*5等于1
#减速
if (abs(goal_distance - distance) < 0.2):
current_linear_speed = abs(goal_distance - distance) / 0.2 * goal_linear_speed
if abs(current_linear_speed) < 0.02:
current_linear_speed = 0.02
if goal_linear_speed<0:
current_linear_speed = -0.02
if abs(current_linear_speed) > abs(goal_linear_speed):
current_linear_speed = goal_linear_speed
#加速
if (distance < 0.1):
current_linear_speed = distance / 0.1 * goal_linear_speed
if abs(current_linear_speed) < 0.02:
current_linear_speed = 0.02
if goal_linear_speed<0:
current_linear_speed = -0.02
if abs(current_linear_speed) > abs(goal_linear_speed):
current_linear_speed = goal_linear_speed
print "distance: %f, current_linear_x: %f"%(distance, move_cmd.linear.x)
# Stop the robot before next action
self.cmd_vel.publish(Twist())
rospy.sleep(0.5)
# Stop the robot for good
self.cmd_vel.publish(Twist())
def MoveY(self,goal_distance,goal_linear_speed):
# Initialize the position variable as a Point type
position = Point()
move_cmd = Twist()
# Get the starting position values
(position, rotation) = self.get_odom()
x_start = position.x
y_start = position.y
current_linear_speed = 0.2
if goal_linear_speed<0:
#当前的线性速度
current_linear_speed = -0.2
# Keep track of the distance traveled
distance = 0
# Enter the loop to move along a side
while distance < goal_distance and not rospy.is_shutdown():
# Publish the Twist message and sleep 1 cycle
move_cmd.linear.y = current_linear_speed
move_cmd.linear.x = 0
self.cmd_vel.publish(move_cmd)
self.r.sleep()
(position, rotation) = self.get_odom()
# Compute the Euclidean distance from the start
distance = sqrt(pow((position.x - x_start), 2) + pow((position.y - y_start), 2))
#在开始走和快要到目的地的时候减小速度,设置加速和减速距离0.2m,0.2*5等于1
#减速
if (abs(goal_distance - distance) < 0.2):
current_linear_speed = abs(goal_distance - distance) / 0.2 * goal_linear_speed
if abs(current_linear_speed) < 0.02:
current_linear_speed = 0.02
if goal_linear_speed<0:
current_linear_speed = -0.02
if abs(current_linear_speed) > abs(goal_linear_speed):
current_linear_speed = goal_linear_speed
#加速
if (distance < 0.1):
current_linear_speed = distance / 0.1 * goal_linear_speed
if abs(current_linear_speed) < 0.02:
current_linear_speed = 0.02
if goal_linear_speed<0:
current_linear_speed = -0.02
if abs(current_linear_speed) > abs(goal_linear_speed):
current_linear_speed = goal_linear_speed
print "distance: %f, current_linear_x: %f"%(distance, move_cmd.linear.x)
# Stop the robot before next action
self.cmd_vel.publish(Twist())
rospy.sleep(2)
# Stop the robot for good
self.cmd_vel.publish(Twist())
#传入目标位置 目标点方向四元素 将机器人转动到向该目标点运动的方向
def AngleY(self,angle):
if(angle<0.0):
angle=2*pi+angle
return angle
else:
return angle
def rotateToGoalDirection(self,turn_Angle):
#获取当前位置
turn_angle=turn_Angle
(position, rotation) = self.get_odom()
goalAngle=rotation+turn_angle
move_cmd=Twist()
turn_angle=self.AngleY(goalAngle)-self.AngleY(rotation)
lastTA=turn_angle
i=0
while abs(turn_angle)>0.02 and not rospy.is_shutdown():
(position, rotation) = self.get_odom()
turn_angle=self.AngleY(goalAngle)-self.AngleY(rotation)
print self.AngleY(goalAngle)
print self.AngleY(rotation)
errA=abs(turn_angle)-abs(lastTA)
print errA
if errA<0.262:
if turn_Angle>0:
move_cmd.angular.z+=-0.015
else:
move_cmd.angular.z+=0.015
elif abs(turn_angle)<0.262:
if turn_Angle>0:
move_cmd.angular.z+=-0.015
else:
move_cmd.angular.z+=0.015
else:
print 'c'
if turn_Angle>0:
move_cmd.angular.z=0-0.6
else:
move_cmd.angular.z=0.6
self.cmd_vel.publish(move_cmd)
self.r.sleep()
print "turnangle: %f cmdZ %f "%(turn_angle,move_cmd.angular.z)
move_cmd=Twist()
self.cmd_vel.publish(move_cmd)
time.sleep(0.5)
self.cmd_vel.publish(move_cmd)
def get_odom(self):
# Get the current transform between the odom and base frame
try:
(trans, rot) = self.tf_listener.lookupTransform(self.odom_frame, self.base_frame, rospy.Time(0))
except (tf.Exception, tf.ConnectivityException, tf.LookupException):
rospy.loginfo("TF Exception")
return
return (Point(*trans), quat_to_angle(Quaternion(*rot)))
def shutdown(self):
# Always stop the robot when shutting down the node.
rospy.loginfo("Stopping the robot...")
self.cmd_vel.publish(Twist())
rospy.sleep(1)
if __name__ == '__main__':
print "OutAndBack"
mout=OutAndBack()
mout.MoveX(1.0,0.3)
#mout.rotateToGoalDirection(1.57)
#mout.MoveX(2.0,-0.3)
#mout.MoveY(2.0,0.3)
#mout.MoveY(1.0,-0.3)
| [
"2385909406@qq.com"
] | 2385909406@qq.com |
8c123c4cb85f559ec663c855328aa670e4071d94 | 9f0587bcc4431e05216e829ce6484454a246270d | /1_Data_Structures_and_Algorithms/14_sorted_item_wihtout_native_comparsion_support.py | 830ea437fb313df7d926a8ec513b837ed27a09f1 | [] | no_license | Kaminagiyou123/python_cookbook | fa40776678b7bf20981e4cc58755304bf61c263a | 3aa3bce1724e13a7b32630512aef9f7d3e1f1c78 | refs/heads/master | 2023-06-03T03:34:31.593532 | 2021-07-01T15:58:09 | 2021-07-01T15:58:09 | 380,417,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | from operator import attrgetter
class User:
def __init__(self,user_id):
self.user_id=user_id
def __repr__(self):
return 'User({})'.format(self.user_id)
users=[User(23),User(3),User(99)]
print(sorted(users,key=lambda x: x.user_id))
print(sorted(users,key=attrgetter('user_id')))
| [
"ran.you8311@gmail.com"
] | ran.you8311@gmail.com |
c4231a9b9e0a030a4e7585bbe5e7b37ecb82b120 | 00c4c7b4d5be8a27790085a60cfc383b0bbe8221 | /_scripts/setup/git_helpers.py | 56aa2699f663df26e5e0ae1b98ea2bc1f1ed446b | [] | permissive | SU-ECE-18-7/hotspotter | 6b0b03fd17c77249bb6cb4cf64a68cbe239271f7 | 199b6a744ef66724cc39f39d7474afb1152f242e | refs/heads/master | 2021-08-22T04:09:32.344059 | 2018-10-01T23:32:52 | 2018-10-01T23:32:52 | 108,045,883 | 0 | 5 | Apache-2.0 | 2018-06-15T22:07:08 | 2017-10-23T22:10:11 | Python | UTF-8 | Python | false | false | 1,983 | py | from os.path import dirname, realpath, join, exists, normpath, isdir, isfile
import subprocess
import os
import sys
# josharian: I doubt that the minimal environment stuff here is
# still needed; it is inherited. This was originally
# an hg_version function borrowed from NumPy's setup.py.
# I'm leaving it in for now because I don't have enough other
# environments to test in to be confident that it is safe to remove.
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'PYTHONPATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
env=env
).communicate()[0]
return out
def git_fetch_url():
''' Return the git fetch url.'''
fetch_url = 'unknown'
try:
out = _minimal_ext_cmd(['git', 'remote', '-v']).strip().decode('ascii')
for item in out.split('\n'):
fetch_pos = item.find(' (fetch)')
origin_pos = item.find('origin\t')
if fetch_pos > -1 and origin_pos > -1:
fetch_url = item[origin_pos+7:fetch_pos]
except Exception:
fetch_url = 'unknown-exception'
return fetch_url
def git_branch():
''' Return the current git branch. '''
try:
out = _minimal_ext_cmd(['git', 'branch'])
_branch1 = out.strip().decode('ascii')+'\n'
_branch2 = _branch1[_branch1.find('*')+1:]
branch = _branch2[:_branch2.find('\n')].strip()
except OSError:
branch = 'release'
return branch
def git_version():
''' Return the sha1 of local git HEAD as a string. '''
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
git_revision = out.strip().decode('ascii')
except OSError:
git_revision = 'unknown-git'
return git_revision
| [
"jon.crall@BakerStreet"
] | jon.crall@BakerStreet |
aa5f4b33489c76f3247c8cbea3d4968230338d92 | b6a2666bb8b18038bad1da2176b0c66f501ac304 | /lesson_4.py | b2221771c02edb6f0208da5301bd0d1bb88a42d6 | [] | no_license | makacyge/Projekt_X | 840b6308b139208f5f78b5494855534575a5a8e3 | f1b8898e87cc6e4a169bb451cacb61c43196cd8a | refs/heads/main | 2023-05-06T22:47:25.892769 | 2021-05-29T09:26:28 | 2021-05-29T09:26:28 | 356,028,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # www='www.conguer_and_command.net'
# if ".com" in www:
# print("com in www")
# else:
# print("com not in www")
# v = 321
# # m_s = str(v) if v < 200 else str(v)[::-1]
# # print(m_s)
# c = 10
# e_f = True
# while e_f:
# c -= 1
# if c > 0:
# e_f = False
# print("Test")
# v = "123456789"
# m_s = v if len(v)< 5 else v[2:5:2]
# print(m_s)
#
# value = 120
# new_value = value / 2 if value < 100 else - value
# print(new_value) | [
"makacyge88@gmail.com"
] | makacyge88@gmail.com |
51b2e7b05b477e46ddadca7741d9d0d675386768 | acc1ebf92bd2c2380ad1fb3c201e892695993421 | /venv/Scripts/rst2html.py | 14fa46f300f14073733d059382cbef7fcccf919f | [] | no_license | patricknutt/CalculatorApp | 8811e55e442396a05d18a45bba7341a4cf49fbf7 | 408316842eb5cfeaae37e3be93a33288ccf88231 | refs/heads/master | 2021-05-19T10:06:49.763501 | 2020-03-31T16:00:33 | 2020-03-31T16:00:33 | 251,643,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | #!C:\Users\nuttp\PycharmProjects\CalculatorApp\venv\Scripts\python.exe
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
| [
"pryderinudd@yahoo.com"
] | pryderinudd@yahoo.com |
ecd0564bf9181737a93f17ed178e36c0d06083ab | 3ffb078c95f48268fac322764d20bb0f673396e2 | /day_to_diary/migrations/0005_auto_20200309_1212.py | eb5f1ab715d43c3645bd4cd4b1b0b92237d8cc7f | [] | no_license | Shivamtyagi505/Python-DashBoardPanel | 818a86e927912669271d93217a36309710a3b976 | 7952f00dc6a98169bc5c9c22c17c4207dda1829e | refs/heads/master | 2023-04-14T07:12:19.346545 | 2021-05-01T06:19:53 | 2021-05-01T06:19:53 | 363,338,483 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | # Generated by Django 2.2.5 on 2020-03-09 06:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('day_to_diary', '0004_auto_20200309_1019'),
]
operations = [
migrations.CreateModel(
name='current_position',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('location', models.TextField()),
('date_posted', models.DateTimeField(auto_now_add=True)),
],
),
migrations.DeleteModel(
name='position',
),
]
| [
"shivam.tyagi@gotomobility.in"
] | shivam.tyagi@gotomobility.in |
89c182daa5b7726cb8251be1c823b804cda7fcad | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_18429.py | 359b1b34ef43ef6954446d0b6d9a5e7a29cc9db7 | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | # ImportError: No module named PyQt5 - OSX Mavericks
export set PYTHONPATH=$PYTHONPATH:/System/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages
| [
"ubuntu@ip-172-31-7-228.us-west-2.compute.internal"
] | ubuntu@ip-172-31-7-228.us-west-2.compute.internal |
626a58c00436702d2161a13f170597fcf37aa5d7 | adecc7f08d1a16947f3a3d134822d8ff955cab04 | /test/functional/p2p_zpos_fakestake.py | 36e3fd19f5fa181566e4557c4ab7b832356284fc | [
"MIT"
] | permissive | NodeZeroCoin/nodezerocore | 75af6bb7a3bb94207deb18c6ca2d9d377ed406e2 | 852034254f8ebc5611f8ffe12104628b27207100 | refs/heads/master | 2021-02-11T11:17:44.408776 | 2020-05-12T22:51:21 | 2020-05-12T22:51:21 | 244,486,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,017 | py | #!/usr/bin/env python3
# Copyright (c) 2019 The PIVX Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Covers the scenario of a zPoS block where the coinstake input is a zerocoin spend
of an already spent coin.
'''
from time import sleep
from test_framework.authproxy import JSONRPCException
from fake_stake.base_test import NZR_FakeStakeTest
class zPoSFakeStake(NZR_FakeStakeTest):
def run_test(self):
self.description = "Covers the scenario of a zPoS block where the coinstake input is a zerocoin spend of an already spent coin."
self.init_test()
DENOM_TO_USE = 5000 # zc denomination
INITAL_MINED_BLOCKS = 321 # First mined blocks (rewards collected to mint)
MORE_MINED_BLOCKS = 301 # More blocks mined before spending zerocoins
self.NUM_BLOCKS = 2 # Number of spammed blocks
# 1) Starting mining blocks
self.log.info("Mining %d blocks to get to zPOS activation...." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
sleep(2)
# 2) Collect the possible prevouts and mint zerocoins with those
self.log.info("Collecting all unspent coins which we generated from mining...")
balance = self.node.getbalance("*", 100)
self.log.info("Minting zerocoins...")
initial_mints = 0
while balance > DENOM_TO_USE:
try:
self.node.mintzerocoin(DENOM_TO_USE)
except JSONRPCException:
break
sleep(1)
initial_mints += 1
self.node.generate(1)
sleep(1)
if initial_mints % 5 == 0:
self.log.info("Minted %d coins" % initial_mints)
if initial_mints >= 70:
break
balance = self.node.getbalance("*", 100)
self.log.info("Minted %d coins in the %d-denom, remaining balance %d", initial_mints, DENOM_TO_USE, balance)
sleep(2)
# 3) mine more blocks
self.log.info("Mining %d more blocks ... and getting spendable zerocoins" % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
mints = self.node.listmintedzerocoins(True, True)
mints_hashes = [x["serial hash"] for x in mints]
# This mints are not ready spendable, only few of them.
self.log.info("Got %d confirmed mints" % len(mints_hashes))
# 4) spend mints
self.log.info("Spending mints in block %d..." % self.node.getblockcount())
spends = 0
spent_mints = []
for mint in mints_hashes:
# create a single element list to pass to RPC spendzerocoinmints
mint_arg = []
mint_arg.append(mint)
try:
self.node.spendzerocoinmints(mint_arg)
sleep(1)
spends += 1
spent_mints.append(mint)
except JSONRPCException as e:
self.log.warning(str(e))
continue
sleep(1)
self.log.info("Successfully spent %d mints" % spends)
# 5) Start mining again so that spends get confirmed in a block.
self.log.info("Mining 5 more blocks...")
self.node.generate(5)
sleep(2)
# 6) Collect some prevouts for random txes
self.log.info("Collecting inputs for txes...")
spending_utxo_list = self.node.listunspent()
sleep(1)
# 7) Create "Fake Stake" blocks and send them
self.log.info("Creating Fake stake zPoS blocks...")
err_msgs = self.test_spam("Main", mints, spending_utxo_list=spending_utxo_list, fZPoS=True)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
zPoSFakeStake().main()
| [
"61714747+NodeZeroCoin@users.noreply.github.com"
] | 61714747+NodeZeroCoin@users.noreply.github.com |
5a74f109aa5a0deaf18ce42d756a9d9d686d2cb4 | 3ecf7a6df0ee1e50e4ca3c50ac500434ac9701f7 | /saploapi.py | 1376004f90d52e6bc7b25b9a47452f3743318bdb | [] | no_license | oskarols/saplo4python | 4700f9efa903fe5f54e4981b77eeef5f88b70af9 | eaa06b44da6e0b6994b5cc7ca7ec2f34b7afc5cd | refs/heads/master | 2016-09-06T13:26:46.537124 | 2011-04-04T14:26:34 | 2011-04-04T14:41:13 | 759,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,743 | py | import json
import urllib
import urllib2
class SaploError(Exception):
"""
Is thrown when an request to the Saplo API for some reason fails
All requests to the SaploJSOnClient should catch this exception, and handle it
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class SaploJSONClient:
"""
Saplo JSON Client.
Handles authentication and json-requests to the saplo API Server.
All requests to the SaploJSONClient should catch the SaploError that is thrown if a request fails
Example of request usage: All these requests returns a dictionary that you can use to retrieve data
try:
client = SaploJSONClient()
client.createCorpus("My new Corpus", "Some description text", "sv")
client.addArticle(corpusId, TitleString, LeadString, BodyString, Date, "someurl.com", "some author")
client.getEntityTags(corpusId, articleId, waittime)
client.getSimilarArticles(corpusId, articleId, wait, numberOfResults, minThreshold, maxThreshold)
client.getCorpusPermission()
except SaploError, err:
print err.__str__()
"""
url = "http://api.saplo.com/rpc/json;jsessionid={token}"
apikey = ''
secretkey = ''
token = ''
def __init__(self,apikey, secretkey, token=None):
"""
Initiates the Saplo JSONClient using the secret & api keys
@type String
@param Saplo API key
@type String
@param Saplo Secret key
"""
self.apikey = apikey
self.secretkey = secretkey
self.token = token
self.__createSession(self.apikey, self.secretkey)
def getArticle(self,corpusId, articleId):
"""
Gives information about the saved headline and publish url for a specific article.
@type number
@param corpusId - The unique id of the corpus where the article is stored
@type number
@param articleId - the id for the article you want information about
@rtype dictionary
@return
corpusId Number - The id of the corpus that the article exists in
articleId String - The id for the article you requested
headline String - The headline for the article
publishUrl String - The url (if it exists) that are saved for the article
"""
params = (corpusId, articleId)
response = self.__doRequest('corpus.getArticle', params)
return self.__handleJSONResponse(response.read())
def getEntityTags(self,corpusId, articleId, waiton):
"""
Gives you the persons, organisations and geographical names (places) that are mentioned within an article/text.
@type number
@param corpusId - The unique id for the corpus where the source article is stored.
@param articleId - The id for the article to which you want to find similar articles.
@param waitOn - This param specifies how long you want to wait for a result to be calculated or if you want to just start a search
and come back later to fetch the result.
We RECOMMEND you to use waitOn = 0. On high load the calculation might not be able to process all requests and then its better
to add a search to the queue and fetch the result
@rtype dictionary
@return
tagId Number - an id for the result based on your articleId and corpusId. (corpusId + articleId + tagId are a unique combination)
tagWord String - a word that has been recognized as an entity and is represented in the article/text
tagTypeId Number - specifies which category a tag has been placed in. 3 = person, 4 = organisation, 5 = geoname (place)
"""
params = (corpusId, articleId,waiton)
response = self.__doRequest('tags.getEntityTags', params)
return self.__handleJSONResponse(response.read())
def getSimilarArticles(self,corpusId, articleId, wait, numberOfResults, minThreshold, maxThreshold):
"""
Searches the corpus you provide and looking for articles that has a similar semantic meaning as your source article.
The request gives you a list with a maximum of 50 articles that are similar to the source article.
@type Number
@param corpusId - The unique id for the corpus where the source article is stored.
@type Number
@param articleId - The id for the article to which you want to find similar articles.
@type Number
@param wait - This param specifies if you want to wait until a result has been calculated or
if you want to just start a search and come back later to fetch the result. We RECOMMEND you to use wait = 0.
On high load the calculation might not be able to process all requests and then its better to add a search to the queue and fetch the result later.
@type Number
@param numberOfResults - How many results that will be returned (default 10).
@type Float
@param minThreshold - If you only want similar articles that are 50 percent like your source article, provide 0.5 as param.
@type Float
@param maxThreshold - If you only want similar articles that are like your source article to a max of 90 %, provide 0.9 as param.
@rtype dictionary
@return
matchId Number an id for the result based on your sourceCorpusId and sourceArticleId.
(sourceCorpusId + sourceArticleId + matchId are a unique combination)
resultCorpusId Number id for the corpus where the similar article exists
resultArticleId Number id for the similar article
resultValue Number value of how similar the result article is.
The scale goes from 0.00 to 1.00. A result of 1 equals the exact same article.
"""
params = [corpusId, articleId, wait, numberOfResults, minThreshold, maxThreshold]
response = self.__doRequest('match.getSimilarArticles', params)
return self.__handleJSONResponse(response.read())
def createCorpus(self,corpusName, corpusDesc, lang):
"""
Creates a new corpus (container to store articles and texts in) and returns an id to the created corpus.
A corpus may only contain articles with the same language (i.e. only english texts in the corpus).
@type String
@param corpusName - Provide a name for your new corpus.
@type String
@param corpusDesc - Provide a description for your new corpus.
Use something that describes what the corpus contains (i.e. English blog posts from my personal blog)
@type String
@param lang - Specify what language the articles/texts that will be stored in the corpus are written in.
English and Swedish are supported and specified according to ISO 639-1 (http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes).
Swedish = "sv", English = "en".
@rtype dictionary
@return
corpusId Int A unique id for your newly created corpus.
"""
params = (corpusName,corpusDesc,lang)
response = self.__doRequest('corpus.createCorpus', params)
return self.__handleJSONResponse(response.read())
def addArticle(self,corpusId, headline, lead, body, publishStart, publishUrl, authors, lang):
'''
Add a new article to a corpus.
If you are adding an article that already exists (the combination of headline+body+publishUrl) then the id for that article will be returned.
@type Number
@param corpusId (required) - The id to the corpus where you want to add your article.
@type String
@param headline (required) - The article headline
@type String
@param lead - The article lead text
@type String
@param body (required) - The body text for the article
@type date
@param publishDate - The date when the article was published (YYYY-MM-DD HH:MM:SS, i.e. 2010-01-24 12:23:44)
@type String
@param publishUrl - The url for where the article can be found on internet
@type String
@param authors - The authors of the article or text
@type String
@param lang (required) - The language for the article.
English and Swedish are supported and specified according to ISO 639-1 (http://en.wikipedia.org/wiki/List_of_ISO_639-1_codes).
Swedish = "sv", English = "en".
@rtype dictionary
@return
corpusId Number A unique corpus id.
articleId Number The id for the new article.
'''
params = (corpusId, headline, lead, body, publishStart, publishUrl, authors,lang)
response = self.__doRequest('corpus.addArticle', params)
return self.__handleJSONResponse(response.read())
def getCorpusPermission(self):
"""
Gives you a list to all corpus ids that you have read or write permission to.
@rtype Dictionary
@return
corpusId Int A unique corpus id.
permission String The permission you have to the unique corpusId. This can have the values of "read" or "write"
"""
response = self.__doRequest('corpus.getPermissions',() )
return self.__handleJSONResponse(response.read())
def getCorpusInfo(self,corpusId):
"""
Gives you the name, description and the last article id for a specific corpus.
@type Number
@param corpusId The unique id for the corpus you want information about.
@rtype Dictionary
@return
corpusId Int The unique id for the corpus you want information about.
corpusName String The current name for the corpus
corpusDesc String The current description for the corpus
lang String The language that are specified for this corpus
lastArticleId Int The id for the last article that has been added to the corpus.
"""
params = [corpusId]
response = self.__doRequest('corpus.getInfo',params)
return self.__handleJSONResponse(response.read())
def deleteCorpus(self, corpusId):
"""
Removes the entire corpus, articles included.
@type Number
@param corpusId The unique id for the corpus you want to delete.
@rtype Dictionary
@return
Bool Returns whether the corpus was successfully deleted or not.
"""
params = [corpusId]
response = self.__doRequest('corpus.deleteCorpus',params)
return self.__handleJSONResponse(response.read())
def createContext(self,contextName,contextDescription):
"""
Creates a new context for your user which articles can be matched against.
A context is a set of articles that you have defined and created a semantic context for.
I.e. this can be a Sport Context, Technology Context etc.
@type String
@param contextName - Provide a name for your new context.
@type String
@param contextDescription - Provide a description for your new context.
Use something that describes what the corpus contains (i.e. English blog posts from my personal blog)
@rtype dictionary
@return
contextId Int A unique id for your newly created context.
"""
params = (contextName,contextDescription)
response = self.__doRequest('context.createContext', params)
return self.__handleJSONResponse(response.read())
def getContexts(self):
"""
Gives you the id, name and description of all the created contexts.
@rtype Dictionary
@return
contextId Int A unique id for the context.
contextName String The context name provided when context was created.
contextDescription String The context description provided when context was created.
"""
response = self.__doRequest('context.listContexts',())
return self.__handleJSONResponse(response.read())
def deleteContext(self, contextId):
"""
Deletes an existing context
@type Int
@param contextId - ID for the context you want to delete.
@return
boolean
"""
params = [contextId]
response = self.__doRequest('context.deleteContext',params)
return self.__handleJSONResponse(response.read())
def updateContext(self, contextId, contextName, contextDescription):
"""
Update an existing contexts name and/or description.
@type Int
@param contextId - Id for the context you want to update
@type String
@param contextName - The contexts name
@type String
@param contextDescription - A description for your context
@return
boolean
"""
params = (contextId, contextName, contextDescription)
response = self.__doRequest('context.updateContext',params)
return self.__handleJSONResponse(response.read())
def addContextArticles(self, contextId, corpusId, articleIds):
"""
Add articles that you like to a specified context.
@type Number
@param contextId - A unique id for the context you want to add the like articles to.
@type Number
@param corpusId - The corpus id for where the articles you want to add as "like" articles exists.
@type Array
@param articleIds - A java formatted ArrayList containing all article ids you want to add as "like" articles.
@rtype Bool
@return
result Bool Returns true if request was successful.
"""
#Json-rpc-java compatible list
javarpcList = {'javaClass':"java.util.ArrayList",
'list':articleIds}
params = [contextId, corpusId,javarpcList]
response = self.__doRequest('context.addLikeArticles', params)
return self.__handleJSONResponse(response.read())
def deleteContextArticles(self, contextId, corpusId, articleIds):
"""
Delete articles that you have added from a specified context.
@type Number
@param contextId - A unique id for the context you want to remove articles from.
@type Number
@param corpusId - The corpus id for where the articles you want to remove articles from exists in.
@type Array
@param articleIds - A java formatted ArrayList containing all ids of the articles you want to remove from the context.
@rtype Bool
@return
result Bool Returns true if request was successful.
"""
#Json-rpc-java compatible list
javarpcList = {'javaClass':"java.util.ArrayList",
'list':articleIds}
params = [contextId, corpusId,javarpcList]
response = self.__doRequest('context.deleteLikeArticles', params)
return self.__handleJSONResponse(response.read())
def getContextSimilarity(self, corpusId, articleId, againstContextIds, threshold, limit, wait):
"""
Get how semantically like an article are to a list of contexts.
@type Number
@param corpusId - The corpus id for where the article you want get similarity for exists
@type Number
@param articleId - The article id for your source article
@type Array
@param articleIds - A java formatted ArrayList containing all context ids you want to get similarity for.
@type Float
@param threshold - Threshold for how like answers must be. E.g. 0.8 is 80 percent similar.
@type Integer
@param limit - Number of max answers.
@type Integer
@param wait - How long you maximum want to wait for an answer before shutting down the connection. (Max 120 sec)
@rtype Bool
@return
contextId Int The context id.
SemanticResultValue Int A value between 0-1 for how semantically like the source article are compared to the context.
"""
#Json-rpc-java compatible list
javarpcList = {'javaClass':"java.util.ArrayList",
'list':againstContextIds}
params = [corpusId,articleId,javarpcList, threshold, limit, wait]
response = self.__doRequest('context.getContextSimilarity', params)
return self.__handleJSONResponse(response.read())
def __createSession(self,apiKey, secretKey):
"""
Creates a session towards the Saplo API
@type String
@param apikey - The apikey to access the Saplo API
@type String
@param secretkey - The secret key to access the Saplo API
"""
#Request a new session
response = self.__doRequest('auth.createSession',(apiKey,secretKey))
# Get the response
jsonresponse = response.read()
#If our request fails, raise an SaploException
try:
self.__handleJSONResponse(jsonresponse)
except SaploError, err:
raise err
#Decode the JSON request and retrieve the token,establishing it as our given token
result = json.loads(jsonresponse)
token = result['result']
self.__setTokenTo(token)
def __doRequest(self, meth, param,sapid=0):
'''
Creates an JSON request to the server from the params
'''
#HTTP params
options = json.dumps(dict(
method = meth,
params = param,
id=sapid))
#Parse the url-string to contain our session-token
url = self.url.format(token = self.token)
#Create HTTP request
request = urllib2.Request(url,options)
response = urllib2.urlopen(request)
return response
def __setTokenTo(self, t):
'''
Sets the class token string to the given param
'''
self.token = t;
def __handleJSONResponse(self, jsonresponse):
response = json.loads(jsonresponse)
#If errors, handle them
if "error" in response:
errormsg = "Unknown error" if ('msg' not in response['error']) else response['error']['msg']
errorcode = "" if ('code' not in response['error']) else response['error']['code']
#Create a readable error message
msg = "An error has occured: '{errormessage}' With code = ({errorcode})".format(
errormessage = errormsg,
errorcode = errorcode,
);
#Raise an SaploError
raise SaploError(msg)
##Otherwise we have a sucessfull response
return response
| [
"oskar.ols@gmail.com"
] | oskar.ols@gmail.com |
4c8efbb3ffb1b83310f3d3377141925154316b69 | dfb21c6ec71ed46a06cd7786c5cdf67aea7de4b5 | /demo/body3d_two_stage_img_demo.py | b1b0da28ecc4b93abb4ef75ea29056d1125b1110 | [
"Apache-2.0"
] | permissive | anhtu-phan/mmpose | 8d2000a366aece75f5409c494614e2d41d283a86 | c672eeb0086c5ca6c4fe52c717b1858b2b54f480 | refs/heads/master | 2023-09-02T01:01:41.792967 | 2021-09-24T09:09:03 | 2021-09-24T09:09:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,758 | py | import os
import os.path as osp
from argparse import ArgumentParser
import mmcv
import numpy as np
from xtcocotools.coco import COCO
from mmpose.apis import (inference_pose_lifter_model,
inference_top_down_pose_model, vis_3d_pose_result)
from mmpose.apis.inference import init_pose_model
from mmpose.core import SimpleCamera
def _keypoint_camera_to_world(keypoints,
camera_params,
image_name=None,
dataset='Body3DH36MDataset'):
"""Project 3D keypoints from the camera space to the world space.
Args:
keypoints (np.ndarray): 3D keypoints in shape [..., 3]
camera_params (dict): Parameters for all cameras.
image_name (str): The image name to specify the camera.
dataset (str): The dataset type, e.g. Body3DH36MDataset.
"""
cam_key = None
if dataset == 'Body3DH36MDataset':
subj, rest = osp.basename(image_name).split('_', 1)
_, rest = rest.split('.', 1)
camera, rest = rest.split('_', 1)
cam_key = (subj, camera)
else:
raise NotImplementedError
camera = SimpleCamera(camera_params[cam_key])
keypoints_world = keypoints.copy()
keypoints_world[..., :3] = camera.camera_to_world(keypoints[..., :3])
return keypoints_world
def main():
parser = ArgumentParser()
parser.add_argument(
'pose_lifter_config',
help='Config file for the 2nd stage pose lifter model')
parser.add_argument(
'pose_lifter_checkpoint',
help='Checkpoint file for the 2nd stage pose lifter model')
parser.add_argument(
'--pose-detector-conifig',
type=str,
default=None,
help='Config file for the 1st stage 2D pose detector')
parser.add_argument(
'--pose-detector-checkpoint',
type=str,
default=None,
help='Checkpoint file for the 1st stage 2D pose detector')
parser.add_argument('--img-root', type=str, default='', help='Image root')
parser.add_argument(
'--json-file',
type=str,
default=None,
help='Json file containing image and bbox inforamtion. Optionally,'
'The Jons file can also contain 2D pose information. See'
'"only-second-stage"')
parser.add_argument(
'--camera-param-file',
type=str,
default=None,
help='Camera parameter file for converting 3D pose predictions from '
' the camera space to to world space. If None, no conversion will be '
'applied.')
parser.add_argument(
'--only-second-stage',
action='store_true',
help='If true, load 2D pose detection result from the Json file and '
'skip the 1st stage. The pose detection model will be ignored.')
parser.add_argument(
'--rebase-keypoint-height',
action='store_true',
help='Rebase the predicted 3D pose so its lowest keypoint has a '
'height of 0 (landing on the ground). This is useful for '
'visualization when the model do not predict the global position '
'of the 3D pose.')
parser.add_argument(
'--show-ground-truth',
action='store_true',
help='If True, show ground truth if it is available. The ground truth '
'should be contained in the annotations in the Json file with the key '
'"keypoints_3d" for each instance.')
parser.add_argument(
'--show',
action='store_true',
default=False,
help='whether to show img')
parser.add_argument(
'--out-img-root',
type=str,
default=None,
help='Root of the output visualization images. '
'Default not saving the visualization images.')
parser.add_argument(
'--device', default='cuda:0', help='Device for inference')
parser.add_argument('--kpt-thr', type=float, default=0.3)
args = parser.parse_args()
assert args.show or (args.out_img_root != '')
coco = COCO(args.json_file)
# First stage: 2D pose detection
pose_det_results_list = []
if args.only_second_stage:
from mmpose.apis.inference import _xywh2xyxy
print('Stage 1: load 2D pose results from Json file.')
for image_id, image in coco.imgs.items():
image_name = osp.join(args.img_root, image['file_name'])
ann_ids = coco.getAnnIds(image_id)
pose_det_results = []
for ann_id in ann_ids:
ann = coco.anns[ann_id]
keypoints = np.array(ann['keypoints']).reshape(-1, 3)
keypoints[..., 2] = keypoints[..., 2] >= 1
keypoints_3d = np.array(ann['keypoints_3d']).reshape(-1, 4)
keypoints_3d[..., 3] = keypoints_3d[..., 3] >= 1
bbox = np.array(ann['bbox']).reshape(1, -1)
pose_det_result = {
'image_name': image_name,
'bbox': _xywh2xyxy(bbox),
'keypoints': keypoints,
'keypoints_3d': keypoints_3d
}
pose_det_results.append(pose_det_result)
pose_det_results_list.append(pose_det_results)
else:
print('Stage 1: 2D pose detection.')
pose_det_model = init_pose_model(
args.pose_detector_config,
args.pose_detector_checkpoint,
device=args.device.lower())
assert pose_det_model.cfg.model.type == 'TopDown', 'Only "TopDown"' \
'model is supported for the 1st stage (2D pose detection)'
dataset = pose_det_model.cfg.data['test']['type']
img_keys = list(coco.imgs.keys())
for i in mmcv.track_iter_progress(range(len(img_keys))):
# get bounding box annotations
image_id = img_keys[i]
image = coco.loadImgs(image_id)[0]
image_name = osp.join(args.img_root, image['file_name'])
ann_ids = coco.getAnnIds(image_id)
# make person results for single image
person_results = []
for ann_id in ann_ids:
person = {}
ann = coco.anns[ann_id]
person['bbox'] = ann['bbox']
person_results.append(person)
pose_det_results, _ = inference_top_down_pose_model(
pose_det_model,
image_name,
person_results,
bbox_thr=None,
format='xywh',
dataset=dataset,
return_heatmap=False,
outputs=None)
for res in pose_det_results:
res['image_name'] = image_name
pose_det_results_list.append(pose_det_results)
# Second stage: Pose lifting
print('Stage 2: 2D-to-3D pose lifting.')
pose_lift_model = init_pose_model(
args.pose_lifter_config,
args.pose_lifter_checkpoint,
device=args.device.lower())
assert pose_lift_model.cfg.model.type == 'PoseLifter', 'Only' \
'"PoseLifter" model is supported for the 2nd stage ' \
'(2D-to-3D lifting)'
dataset = pose_lift_model.cfg.data['test']['type']
camera_params = None
if args.camera_param_file is not None:
camera_params = mmcv.load(args.camera_param_file)
for i, pose_det_results in enumerate(
mmcv.track_iter_progress(pose_det_results_list)):
# 2D-to-3D pose lifting
# Note that the pose_det_results are regarded as a single-frame pose
# sequence
pose_lift_results = inference_pose_lifter_model(
pose_lift_model,
pose_results_2d=[pose_det_results],
dataset=dataset,
with_track_id=False)
image_name = pose_det_results[0]['image_name']
# Pose processing
pose_lift_results_vis = []
for idx, res in enumerate(pose_lift_results):
keypoints_3d = res['keypoints_3d']
# project to world space
if camera_params is not None:
keypoints_3d = _keypoint_camera_to_world(
keypoints_3d,
camera_params=camera_params,
image_name=image_name,
dataset=dataset)
# rebase height (z-axis)
if args.rebase_keypoint_height:
keypoints_3d[..., 2] -= np.min(
keypoints_3d[..., 2], axis=-1, keepdims=True)
res['keypoints_3d'] = keypoints_3d
# Add title
det_res = pose_det_results[idx]
instance_id = det_res.get('track_id', idx)
res['title'] = f'Prediction ({instance_id})'
pose_lift_results_vis.append(res)
# Add ground truth
if args.show_ground_truth:
if 'keypoints_3d' not in det_res:
print('Fail to show ground truth. Please make sure that'
' the instance annotations from the Json file'
' contain "keypoints_3d".')
else:
gt = res.copy()
gt['keypoints_3d'] = det_res['keypoints_3d']
gt['title'] = f'Ground truth ({instance_id})'
pose_lift_results_vis.append(gt)
# Visualization
if args.out_img_root is None:
out_file = None
else:
os.makedirs(args.out_img_root, exist_ok=True)
out_file = osp.join(args.out_img_root, f'vis_{i}.jpg')
vis_3d_pose_result(
pose_lift_model,
result=pose_lift_results_vis,
img=pose_lift_results[0]['image_name'],
out_file=out_file)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | anhtu-phan.noreply@github.com |
f124b27cbc60a2349e0a3e8135d887a88c6bc90a | 459a25a4f8f062d9ec6e43dbc38fcb26fc54ce51 | /settings.py | 8f1ff7cddb649b3f8ebfd2ada46e376a5949f786 | [] | no_license | shivamgupta-tudip/gitsession2 | 6e1d6c8be3831a20ba0975d746a070ca93ae0445 | afa95ac68b266b9967c56f5d133e80ca78d7642e | refs/heads/master | 2022-11-28T19:19:24.794876 | 2020-08-11T16:38:45 | 2020-08-11T16:38:45 | 286,790,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,109 | py | import os
import logging, logging.handlers
import environment
import logconfig
# If using a separate Python package (e.g. a submodule in vendor/) to share
# logic between applications, you can also share settings. Just create another
# settings file in your package and import it like so:
#
# from comrade.core.settings import *
#
# The top half of this settings.py file is copied from comrade for clarity. We
# use the import method in actual deployments.
# Make filepaths relative to settings.
path = lambda root,*a: os.path.join(root, *a)
ROOT = os.path.dirname(os.path.abspath(__file__))
# List of admin e-mails - we use Hoptoad to collect error notifications, so this
# is usually blank.
ADMINS = ()
MANAGERS = ADMINS
# Deployment Configuration
class DeploymentType:
PRODUCTION = "PRODUCTION"
DEV = "DEV"
SOLO = "SOLO"
STAGING = "STAGING"
dict = {
SOLO: 1,
PRODUCTION: 2,
DEV: 3,
STAGING: 4
}
if 'DEPLOYMENT_TYPE' in os.environ:
DEPLOYMENT = os.environ['DEPLOYMENT_TYPE'].upper()
else:
DEPLOYMENT = DeploymentType.SOLO
def is_solo():
return DEPLOYMENT == DeploymentType.SOLO
SITE_ID = DeploymentType.dict[DEPLOYMENT]
DEBUG = DEPLOYMENT != DeploymentType.PRODUCTION
STATIC_MEDIA_SERVER = is_solo()
TEMPLATE_DEBUG = DEBUG
SSL_ENABLED = not DEBUG
INTERNAL_IPS = ('127.0.0.1',)
# Logging
if DEBUG:
LOG_LEVEL = logging.DEBUG
else:
LOG_LEVEL = logging.INFO
# Only log to syslog if this is not a solo developer server.
USE_SYSLOG = not is_solo()
# Cache Backend
CACHE_TIMEOUT = 3600
MAX_CACHE_ENTRIES = 10000
CACHE_MIDDLEWARE_SECONDS = 3600
CACHE_MIDDLEWARE_KEY_PREFIX = ''
# Don't require developers to install memcached, and also make debugging easier
# because cache is automatically wiped when the server reloads.
if is_solo():
CACHE_BACKEND = ('locmem://?timeout=%(CACHE_TIMEOUT)d'
'&max_entries=%(MAX_CACHE_ENTRIES)d' % locals())
else:
CACHE_BACKEND = ('memcached://127.0.0.1:11211/?timeout=%(CACHE_TIMEOUT)d'
'&max_entries=%(MAX_CACHE_ENTRIES)d' % locals())
# E-mail Server
if is_solo():
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
else:
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'YOU@YOUR-SITE.com'
EMAIL_HOST_PASSWORD = 'PASSWORD'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
DEFAULT_FROM_EMAIL = "Bueda Support <support@bueda.com>"
CONTACT_EMAIL = 'support@bueda.com'
# Internationalization
TIME_ZONE = 'UTC'
LANGUAGE_CODE = 'en-us'
USE_I18N = False
# Testing & Coverage
# Use nosetests instead of unittest
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
COVERAGE_REPORT_HTML_OUTPUT_DIR = 'coverage'
COVERAGE_MODULE_EXCLUDES = ['tests$', 'settings$', 'urls$', 'vendor$',
'__init__', 'migrations', 'templates', 'django', 'debug_toolbar',
'core\.fixtures', 'users\.fixtures',]
try:
import multiprocessing
cpu_count = multiprocessing.cpu_count()
except ImportError:
cpu_count = 1
NOSE_ARGS = ['--logging-clear-handlers', '--processes=%s' % cpu_count]
if is_solo():
try:
os.mkdir(COVERAGE_REPORT_HTML_OUTPUT_DIR)
except OSError:
pass
# Paths
MEDIA_ROOT = path(ROOT, 'media')
MEDIA_URL = '/media/'
ADMIN_MEDIA_PREFIX = '/media/admin'
ROOT_URLCONF = 'urls'
# Version Information
# Grab the current commit SHA from git - handy for confirming the version
# deployed on a remote server is the one you think it is.
import subprocess
GIT_COMMIT = subprocess.Popen(['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE).communicate()[0].strip()
del subprocess
# Database
DATABASES = {}
if DEPLOYMENT == DeploymentType.PRODUCTION:
DATABASES['default'] = {
'NAME': 'boilerplate',
'ENGINE': 'django.db.backends.mysql',
'HOST': 'your-database.com',
'PORT': '',
'USER': 'boilerplate',
'PASSWORD': 'your-password'
}
elif DEPLOYMENT == DeploymentType.DEV:
DATABASES['default'] = {
'NAME': 'boilerplate_dev',
'ENGINE': 'django.db.backends.mysql',
'HOST': 'your-database.com',
'PORT': '',
'USER': 'boilerplate',
'PASSWORD': 'your-password'
}
elif DEPLOYMENT == DeploymentType.STAGING:
DATABASES['default'] = {
'NAME': 'boilerplate_staging',
'ENGINE': 'django.db.backends.mysql',
'HOST': 'your-database.com',
'PORT': '',
'USER': 'boilerplate',
'PASSWORD': 'your-password'
}
else:
DATABASES['default'] = {
'NAME': 'db',
'ENGINE': 'django.db.backends.sqlite3',
'HOST': '',
'PORT': '',
'USER': '',
'PASSWORD': ''
}
# Message Broker (for Celery)
BROKER_HOST = "localhost"
BROKER_PORT = 5672
BROKER_USER = "boilerplate"
BROKER_PASSWORD = "boilerplate"
BROKER_VHOST = "boilerplate"
CELERY_RESULT_BACKEND = "amqp"
# Run tasks eagerly in development, so developers don't have to keep a celeryd
# processing running.
CELERY_ALWAYS_EAGER = is_solo()
CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
# South
# Speed up testing when you have lots of migrations.
SOUTH_TESTS_MIGRATE = False
SKIP_SOUTH_TESTS = True
# Logging
SYSLOG_FACILITY = logging.handlers.SysLogHandler.LOG_LOCAL0
SYSLOG_TAG = "boilerplate"
# See PEP 391 and logconfig.py for formatting help. Each section of LOGGING
# will get merged into the corresponding section of log_settings.py.
# Handlers and log levels are set up automatically based on LOG_LEVEL and DEBUG
# unless you set them here. Messages will not propagate through a logger
# unless propagate: True is set.
LOGGERS = {
'loggers': {
'boilerplate': {},
},
}
logconfig.initialize_logging(SYSLOG_TAG, SYSLOG_FACILITY, LOGGERS, LOG_LEVEL,
USE_SYSLOG)
# Debug Toolbar
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
# Application Settings
SECRET_KEY = 'TODO-generate-a-new-secret-key'
LOGIN_URL = '/login'
LOGIN_REDIRECT_URL = '/'
# Sessions
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
# Middleware
middleware_list = [
'commonware.log.ThreadRequestMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'comrade.core.middleware.HttpMethodsMiddleware',
]
if is_solo():
middleware_list += [
'comrade.core.middleware.ArgumentLogMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
else:
middleware_list += [
'django.middleware.transaction.TransactionMiddleware',
'commonware.middleware.SetRemoteAddrFromForwardedFor',
]
MIDDLEWARE_CLASSES = tuple(middleware_list)
# Templates
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
if not is_solo():
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', TEMPLATE_LOADERS),
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.request',
'django.core.context_processors.csrf',
'django.core.context_processors.media',
'django.contrib.messages.context_processors.messages',
'comrade.core.context_processors.default',
'django.core.context_processors.media',
)
TEMPLATE_DIRS = (
path(ROOT, 'templates')
)
apps_list = [
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sites',
'django.contrib.markup',
'django.contrib.messages',
#'your',
#'apps',
#'here',
'foo',
]
if is_solo():
apps_list += [
'django_extensions',
'debug_toolbar',
'django_nose',
'django_coverage',
]
INSTALLED_APPS = tuple(apps_list)
| [
"shivam.gupta@tudip.com"
] | shivam.gupta@tudip.com |
fb09ffc4861425b6d73313e8135314a04eefa838 | 2d5e80c0f16bef70e5928ea52251b8674d2d301b | /mycode/13_exception/example_2.py | 6f189a5de4bc4a2f97702cd231136a5599abfa55 | [] | no_license | 6democratickim9/python_basic | 8340bd1f90b42eb5d6fe90435ef713473cdc5ebd | a9b2e1a968c5fea61785df84a6187ab1ca41ff88 | refs/heads/master | 2023-02-16T01:48:10.905198 | 2021-01-05T03:00:21 | 2021-01-05T03:00:21 | 326,850,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | for i in range(10):
try:
print(i, 10 // i)
# 0일 때만 메세지 뿌림
except ZeroDivisionError as err:
print(err)
print("Not divided by 0")
finally:
print('END') | [
"shcomon24@gmail.com"
] | shcomon24@gmail.com |
f66099df0bc30cad22f8a0934afa1351121e5fc9 | b8b2f08f3331cde3d7952a7c4c9b65999271e682 | /products/migrations/0024_auto_20180421_1802.py | 3b67fccf53ced22eddc6cd324ac3e7127c0137dc | [] | no_license | mi-doc/emarket | 8a114b1aca894fecd8de7eae6f837d699541bd3a | de1190f511f0491b17213d705dc9ba5cc7fb49ee | refs/heads/master | 2022-09-19T15:35:06.033452 | 2022-08-24T14:29:19 | 2022-08-24T14:29:19 | 113,761,507 | 0 | 0 | null | 2022-08-24T14:30:10 | 2017-12-10T15:35:06 | Python | UTF-8 | Python | false | false | 2,749 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2018-04-21 18:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0023_auto_20180313_1845'),
]
operations = [
migrations.AddField(
model_name='product',
name='slug',
field=models.SlugField(default=None, null=True),
),
migrations.AlterField(
model_name='product',
name='built_in_memory',
field=models.IntegerField(blank=True, default=None, null=True, verbose_name='Built in memory (Gb)'),
),
migrations.AlterField(
model_name='product',
name='diagonal',
field=models.DecimalField(blank=True, decimal_places=1, default=None, max_digits=5, null=True,
verbose_name='Diagonal (inches)'),
),
migrations.AlterField(
model_name='product',
name='discount',
field=models.IntegerField(default=0, null=True, verbose_name='Discount (percent)'),
),
migrations.AlterField(
model_name='product',
name='main_camera',
field=models.IntegerField(blank=True, default=None, null=True, verbose_name='Main camera (Mpx)'),
),
migrations.AlterField(
model_name='product',
name='os',
field=models.CharField(blank=True, default=None, max_length=30, null=True),
),
migrations.AlterField(
model_name='product',
name='other_specifications',
field=models.TextField(blank=True, default=None, null=True),
),
migrations.AlterField(
model_name='product',
name='price',
field=models.IntegerField(default=0, null=True, verbose_name='Price'),
),
migrations.AlterField(
model_name='product',
name='processor',
field=models.CharField(blank=True, default=None, max_length=30, null=True),
),
migrations.AlterField(
model_name='product',
name='ram',
field=models.IntegerField(blank=True, default=None, null=True, verbose_name='Ram (Gb)'),
),
migrations.AlterField(
model_name='product',
name='screen_resolution',
field=models.CharField(blank=True, default=None, max_length=10, null=True),
),
migrations.AlterField(
model_name='product',
name='short_description',
field=models.TextField(blank=True, default=None, max_length=100, null=True),
),
]
| [
"m.nikolaev1@gmail.com"
] | m.nikolaev1@gmail.com |
c432933ce0fe73abcaf7f23a86fb750a7156178d | 145205b1b9b9042a5809bf10c05b546be2f27f6f | /chapter07/interface_demo.py | a0b503cdd5a26b870bfa47440dd50e7e300bc64d | [] | no_license | tangkaiyang/python_interface_development_and_testing | 43ff43ee86788bcb5c07a26d81e8eef0294771ca | 1349d309a2b551f17de3aaff266548e53dd10c4b | refs/heads/master | 2020-04-25T02:06:43.543323 | 2019-03-13T06:32:10 | 2019-03-13T06:32:10 | 172,427,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | from zope.interface import Interface
from zope.interface.declarations import implementer
# 定义接口
class IHost(Interface):
def goodmorning(self, host):
"""Say good morning to host"""
@implementer(IHost) # 继承接口
class Host:
def goodmorning(self, guest):
"""Say good morning to guest"""
return "Good morning, %s!" % guest
if __name__ == '__main__':
p = Host()
hi = p.goodmorning('Tom')
print(hi)
| [
"945541696@qq.com"
] | 945541696@qq.com |
cdca15dec52150d8b7d23bc21f80e497f088999e | b18c62e2b862d5ba8eb2b678dcc2635259788067 | /test_real_experiment/z_train_s1.py | fdfc25b79bcddbbb62f2bb8864e64364746d29de | [] | no_license | zbwby819/Sensor_network | c93063ae559eb545c432ac53de0d3360313a3e9f | eaf7f7e0b1ba578090ad8cb0e1b833cfc3a1ea64 | refs/heads/master | 2023-06-27T04:19:04.514645 | 2021-08-02T07:53:53 | 2021-08-02T07:53:53 | 352,576,340 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,412 | py | # -*- coding: utf-8 -*-
"""
Created on Thu May 13 23:16:15 2021
@author: Win10
"""
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import os, cv2, math
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from tensorflow.keras.preprocessing import image
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from keras.callbacks import TensorBoard
from spektral.layers import GCNConv, GATConv
from spektral.utils import gcn_filter
from loc2dir import theta, angle2xy
from keras_lr_multiplier import LRMultiplier
from tensorflow.keras.layers import (
Input,Conv2D, Dense, UpSampling2D, MaxPooling2D, AveragePooling2D, Concatenate, Flatten, Reshape )
################################################################# hyper parameters
batch_size = 32
max_train_iter = 10000
max_num_sensors = 10
max_env_map = 10
pixel_dim = 84 # image size
input_shape = (pixel_dim,pixel_dim*4,3) # input size
sensor_dis_threshold = 20 # distance for admatrix 20 == full connection
init_lr = 3e-4
################################################################# load_input
def load_input(num_sensors=4, select_case=np.arange(2,33), select_env=1, path='training/'):
# load path for sensors
all_sensors = []
for kk in range(num_sensors):
all_sensors.append('sensor_{}'.format(kk+1))
# load img name
filePath = path+'env_{}/'.format(select_env) +'sensor_1/1'
filelist = os.listdir(filePath)
filelist.sort(key = lambda x: int(x[:-4]))
all_input = []
image_index = []
for i in select_case:
# obs for one batch
all_sensor_input = np.zeros((num_sensors, pixel_dim, pixel_dim*4, 3)) # h,w, rgb
for idx_sensor in range(num_sensors):
sensor_path = path + 'env_{}/'.format(select_env) + all_sensors[idx_sensor]
img_1 = image.load_img(sensor_path+'/1/'+filelist[i], target_size=(pixel_dim,pixel_dim)) #height-width
img_array_1 = image.img_to_array(img_1)
img_2 = image.load_img(sensor_path+'/2/'+filelist[i], target_size=(pixel_dim,pixel_dim)) #height-width
img_array_2 = image.img_to_array(img_2)
img_3 = image.load_img(sensor_path+'/3/'+filelist[i], target_size=(pixel_dim,pixel_dim)) #height-width
img_array_3 = image.img_to_array(img_3)
img_4 = image.load_img(sensor_path+'/4/'+filelist[i], target_size=(pixel_dim,pixel_dim)) #height-width
img_array_4 = image.img_to_array(img_4)
all_sensor_input[idx_sensor,:, pixel_dim*3:pixel_dim*4,:] = img_array_1/255
all_sensor_input[idx_sensor,:, pixel_dim*2:pixel_dim*3,:] = img_array_2/255
all_sensor_input[idx_sensor,:, pixel_dim*1:pixel_dim*2,:] = img_array_3/255
all_sensor_input[idx_sensor,:, pixel_dim*0:pixel_dim*1,:] = img_array_4/255
all_input.append(all_sensor_input.copy())
image_index.append(int(filelist[i][:-4]))
return np.array(all_input), image_index
def read_target(select_case=np.arange(2,33), target_path='training/env_1/target_loc.txt'):
target_loc = []
target_label = open(target_path,"r")
lines = target_label.readlines()
for i in range(len(select_case)):
label_index = lines[select_case[i]-1].index(')')
label_target = int(lines[select_case[i]-1][label_index+1:-1])
x_index_1 = lines[select_case[i]-1].index('(')
x_index_2 = lines[select_case[i]-1].index(',')
label_x = float(lines[select_case[i]-1][x_index_1+1:x_index_2])
z_index_1 = lines[select_case[i]-1].index(',', x_index_2+1)
z_index_2 = lines[select_case[i]-1].index(')')
label_z = float(lines[select_case[i]-1][z_index_1+2:z_index_2])
#t_x, t_y, t_z = change_axis(env, (label_x, 0, label_z))
target_loc.append((label_x, label_z))
return target_loc
def load_label(select_env, num_sensors, image_index):
all_sensors = []
for i in range(num_sensors):
all_sensors.append('sensor_{}'.format(i+1))
all_target_label = []
env_map = np.load('training/env_{}.npy'.format(select_env))
sen_loc = np.load('training/env_{}_sensor.npy'.format(select_env))
tar_loc = read_target('training/env_{}/target_loc.txt'.format(select_env), image_index)
for i in range(len(image_index)):
target_label = []
for j in range(num_sensors):
sensor_dir = []
s_x, s_z = sen_loc[0][j], sen_loc[1][j]
s_path = theta(env_map, (s_x, s_z), (math.ceil(tar_loc[i][0]), math.ceil(tar_loc[i][1])))
s_angle = angle2xy(s_path[0], s_path[1])
sensor_dir.append(s_angle)
target_label.append(sensor_dir)
all_target_label.append(target_label)
return all_target_label
##################################################################################
def cal_admatrix(env_index=1, num_sensors=4, sensor_dis = sensor_dis_threshold):
sensor_loc = np.load('training/env_{}_sensor.npy'.format(env_index))
ad_matrix = np.zeros((num_sensors, num_sensors))
for s1 in range(num_sensors):
for s2 in range(num_sensors):
if s1 != s2:
s_dis = np.sqrt((sensor_loc[0][s1]-sensor_loc[0][s2])**2+(sensor_loc[1][s1]-sensor_loc[1][s2])**2)
if s_dis <= sensor_dis_threshold:
ad_matrix[s1, s2] = 1
return ad_matrix
################################################################# build model
def mlp_model(gnn_unit=256):
input_data = Input(shape=gnn_unit)
output1 = Dense(128, activation='relu', name='mlp_1')(input_data)
output1 = Dense(32, activation='relu', name='mlp_2')(output1)
output1 = Dense(2, activation='linear', name='sensors')(output1)
model = Model(inputs=[input_data], outputs=[output1])
return model
def cnn_model(input_shape=(pixel_dim,pixel_dim*4,3)):
act_func = 'relu'
input_layer = Input(shape=input_shape)
h = Conv2D(64, (1, 2), strides=(1, 2), activation = act_func, padding='valid', name='conv_1')(input_layer)
h = Conv2D(64, (1, 2), strides=(1, 2), activation = act_func, padding='valid', name='conv_2')(h)
h = Conv2D(128, (3, 3), strides=(2, 2) ,activation = act_func, padding='same', name='conv_3')(h) # pooling
h = Conv2D(128, (3, 3), activation = act_func, padding='same', name='conv_4')(h)
h = Conv2D(256, (3, 3), strides=(2, 2) ,activation = act_func, padding='same', name='conv_5')(h) # pooling
h = Conv2D(256, (3, 3), activation = act_func, padding='same', name='conv_6')(h)
h = Conv2D(256, (3, 3), strides=(2, 2) ,activation = act_func, padding='same', name='conv_7')(h) # pooling
h = Conv2D(256, (3, 3), activation = act_func, padding='same', name='conv_8')(h)
h = Conv2D(256, (3, 3), strides=(2, 2) ,activation = act_func, padding='same', name='conv_9')(h) # pooling
h = Conv2D(256, (3, 3), activation = act_func, padding='same', name='conv_10')(h)
h = Conv2D(256, (3, 3), strides=(2, 2) ,activation = act_func, padding='same', name='conv_11')(h) # pooling
h = Conv2D(256, (3, 3), activation = 'relu', padding='valid', name='conv_12')(h)
output_layer = Reshape((1,h.shape[-1]))(h)
return Model(input_layer, output_layer)
def load_model_gcn(num_sensors, input_shape=(pixel_dim,pixel_dim*4,3), gnn_layers=2, gnn_unit=256, is_robot=0):
input_data, output_data = [], []
#tf.compat.v1.enable_eager_execution()
s_cnn = cnn_model(input_shape)
if is_robot:
num_sensors += 1
sensor_matrix = Input(shape=(num_sensors, num_sensors))
for i in range(num_sensors):
exec('s_input{} = Input(shape=(input_shape[0], input_shape[1], input_shape[2]))'.format(i))
exec('extract_cnn{} = s_cnn(s_input{})'.format(i, i))
exec('input_data.append(s_input{})'.format(i))
input_data.append(sensor_matrix)
exec('extract_cnn = extract_cnn0')
for i in range(1,num_sensors):
exec('extract_cnn = Concatenate(axis=1)([extract_cnn, extract_cnn{}])'.format(i))
for j in range(1, gnn_layers+1):
if j == 1:
exec("G_h{} = GCNConv(gnn_unit, activation='relu', dropout_rate=0, name='GNN_{}',)([extract_cnn, sensor_matrix])".format(j, j))
else:
exec("G_h{} = GCNConv(gnn_unit, activation='relu', dropout_rate=0, name='GNN_{}',)([G_h{}, sensor_matrix])".format(j, j, j-1))
exec('gnn_output = tf.split(G_h{}, num_sensors, 1)'.format(gnn_layers))
mlp_layer = mlp_model()
for i in range(num_sensors):
exec('output{} = mlp_layer(Flatten()(gnn_output[i]))'.format(i))
exec('output_data.append(output{})'.format(i))
model = Model(inputs=input_data,
outputs= output_data)
return model
################################################################# training
sensor_per_map = [4,5,7,6,8,9,9,10,10,10,10,10]
def train_model(num_epoch, num_batch, num_maps=10, lr_decay=True, is_robot=0, is_gcn=0):
cur_lr = init_lr
for ep in range(num_epoch):
print('starting training epoch:', ep)
if lr_decay:
cur_lr = init_lr/np.sqrt(ep+1)
select_env = np.random.randint(10)
num_sensors = sensor_per_map[select_env]
select_case = np.arange(2,33)
np.random.shuffle(select_case)
select_case = select_case[:32]
input_image, image_index = load_input(num_sensors, select_case, select_env)
if is_robot:
z_admatrix = np.ones((len(select_case), num_sensors+1, num_sensors+1))
else:
z_admatrix = np.ones((len(select_case), num_sensors, num_sensors))
if is_gcn:
z_admatrix = gcn_filter(z_admatrix)
#input_admatrix = gcn_filter(z_admatrix)
input_admatrix = z_admatrix.copy()
input_data = []
for i in range(num_sensors):
input_data.append(input_image[:,i])
input_data.append(input_admatrix)
input_label = load_label(select_env, num_sensors, image_index)
model = load_model_gcn(num_sensors)
model.load_weights('training/model_gcn_s1.h5')
model.compile(optimizer=Adam(learning_rate=cur_lr), loss='mse')
train_his = model.fit(input_data, np.asarray(input_label), batch_size=num_batch, epochs=int(len(input_label)/num_batch))
hist_df = pd.DataFrame(train_his.history)
hist_csv_file = 'training/history.csv'
with open(hist_csv_file, mode='a') as f:
hist_df.to_csv(f, index=False)
model.save('training/model_gcn_s1.h5')
#z_res = model.predict(input_data)
##################################################################
#tf.config.run_functions_eagerly(True)
#train_model(num_epoch=max_train_iter, num_batch=batch_size, num_maps = max_env_map)
################################################################## plot history
#history_loss = pd.read_csv('training/history.csv')
#all_loss = history_loss['loss']
#s1_loss = history_loss['model_4_loss']
#z_his = np.load()
| [
"noreply@github.com"
] | zbwby819.noreply@github.com |
ba63583603ba510a720273f9f094580bcf66f634 | 2e34e7292191a265412638bc2f8476a319df977a | /day9/process.py | fbd7d040400e0c8553cb2fbda85c54a48a6e6057 | [] | no_license | Snow670/Notes | 2a0d0e88834009b95b9dd5698535b97bca8c9ace | 141c10b6fc918e65cae45501ca667779ddd16784 | refs/heads/master | 2022-12-17T09:43:53.647504 | 2020-09-21T00:37:49 | 2020-09-21T00:37:49 | 281,343,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,170 | py | ''''
from multiprocessing import Process
import time
import random
def sing(name):
print("是%s在singing"%name)
time.sleep(random.randint(1,3))
print('%s唱完了'%name)
if __name__ == '__main__':
p1 = Process(target=sing,args=('aa',))
p2 = Process(target=sing,args=('bb',))
p3 = Process(target=sing,args=('cc',))
p1.start()
p2.start()
p3.start()
print('主进程')
'''
#———————————第二种创建进程——————————————————————
#创建一个类,然后集成Process类,但是必须定义一个run()方法
'''
import multiprocessing import process
import time
import random
class Sing(Process):
def __init__(self,name):
super(Sing,self).__init__()
self.name = name
def run(self):
print('%s在唱歌'%self.name)
time.sleep(random.randint(1,3))
print('%s唱完了'%self.name)
if __name__ == '__main__':
p1 = Sing('aa')
p2 = Sing('bb')
p3 = Sing('cc')
p1.start()
p2.start()
p3.start()
'''
#-----------Process方法----------
'''
from multiprocessing import Process
import time
import random
def sing(name):
print("是%s在singing"%name)
time.sleep(random.randint(1,3))
print('%s唱完了'%name)
if __name__ == '__main__':
p1 = Process(target=sing,args=('aa',))
p2 = Process(target=sing,args=('bb',))
p3 = Process(target=sing,args=('cc',))
#设置p2为守护进程
p2.daemon = True
p1.start()
p2.start()
#关闭p1
p1.terminate()
print('p1 is alive',p1.is_alive())
p3.start()
print(p3.pid)
p3.join(2)
print('主进程')
'''
#-----for循环--------
from multiprocessing import Process
import time
import random
def sing(name):
print("是%s在singing"%name)
time.sleep(random.randint(1,3))
print('%s唱完了'%name)
if __name__ == '__main__':
name_list = ['aa','bb','cc']
process_list = []
for name in name_list:
p = Process(target=sing,args=(name,))
p.start()
process_list.append(p)
for p in process_list:
p.join()
print('主进程')
| [
"1419517126@qq.com"
] | 1419517126@qq.com |
4aad8d0d5b5850c39f39e8c95e5ba033c42bc851 | e187289557f2dfbdd5ebb76a10e20bd0cdc37f6d | /run_network.py | 7fb37c51ee6545d9baca92a09518b5566e13139e | [] | no_license | naumovvs/publictransportnet | f0de482eb78c48601c0dcde04e9a074517dd38bc | 432c41c72cd1715db615385e7936489cba2c931b | refs/heads/master | 2021-07-04T07:47:46.160517 | 2021-05-26T10:07:09 | 2021-05-26T10:07:09 | 97,127,213 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | from stochastic import stochastic
from transportnet import net
# generation of a random transport network
# given: num_nodes - number of nodes in a net
# num_links - number of links in a net
# link_weight - stochastic variable
#
# num_nodes max(num_links)
# 1 0 0*1
# 2 2 1*2
# 3 6 2*3
# 4 12 3*4
# 5 ? 4*5(?)
# ... ...
# n n*(n-1)
# forming the network
#
n = net.Net()
# stochastic variable of the links weight
sw = stochastic.Stochastic(1, 1, 0.2)
# stochastic variable of the number of line stops
sn = stochastic.Stochastic(0, 3, 4)
# generating the links
n.generate(6, 30, sw)
# generating the lines
n.gen_lines(5, sn)
# print out simulation results
n.print_characteristics() | [
"naumov.vs@gmail.com"
] | naumov.vs@gmail.com |
c0c5969c0fb1b5f8690abfc5dd591828908f3465 | cd01fb045cd3807ac8c7205f18bf5453c4f84573 | /apps/order/admin.py | 438cab2c56e0ee3923782a26df2a6620c4834988 | [] | no_license | quannguyen13/Ecommerce | cb8681251e4d4e2a38428795dc2aba948249c7d3 | 8eca6f7592f658558444954e0caf3328a1aa12f1 | refs/heads/master | 2023-08-28T06:22:23.333026 | 2021-11-09T05:33:39 | 2021-11-09T05:33:39 | 402,971,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | from django.contrib import admin
# Register your models here.
from .models import Order, OrderItem
admin.site.register(Order)
admin.site.register(OrderItem)
| [
"waynenguyenus@gmail.com"
] | waynenguyenus@gmail.com |
4e6e206cacc8c2fe7deb74864afd3e7a1499a9dc | 1bbf8c3c14237652839ccfe8f184afbb168a2325 | /extractors/__init__.py | d6153388286b71dd15b69aaa7d5eaa4279aa5f13 | [] | no_license | gurn33sh/pdownloader | baf4b96e4a59bbe4247557bb3f4480a818802426 | 940968922ce56a7913685abdfe9af7b15996ea9f | refs/heads/main | 2023-07-10T17:37:45.415271 | 2021-08-23T06:09:58 | 2021-08-23T06:09:58 | 396,684,321 | 0 | 1 | null | 2021-08-16T10:40:56 | 2021-08-16T07:55:44 | Python | UTF-8 | Python | false | false | 51 | py | from extractors.url_extractor_from_webpage import * | [
"gurn33sh@gmail.com"
] | gurn33sh@gmail.com |
d547244b5f53cfb0911e7a7f987e51385c268b53 | 14321c43ff636244fb79d4435874ef062f6ed785 | /ML_Analysis/GeiloWinterSchool2018/course_material/tictactoe_numpy.py | 5d367c5189e1ccef25bc7bc4a4dee06f05f88d17 | [] | no_license | Etienne357/FYS5555 | 13f9301bcca6f4d16c95a38245b025ea35356f58 | 8bb0ec5eecb37abb451a3a9b25d3f3aca9a1f882 | refs/heads/master | 2022-02-04T22:48:30.649921 | 2022-01-13T13:45:09 | 2022-01-13T13:45:09 | 194,678,500 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,818 | py | import random
import itertools as it
import numpy as np
def winner_horizontal(board):
# Rows where all entries are equal to 1 or 2
max = np.max(board, axis=1)
min = np.min(board, axis=1)
indices = np.where((max == min) & (min > 0))[0]
# If there are any, return
for i in indices:
return board[i,0]
# Check for draw
if (board > 0).all():
return -1
return None
_check = [
(0,3,6,0,1,2,0,2),
(1,4,7,3,4,5,4,4),
(2,5,8,6,7,8,8,6),
]
def winner(board):
i, j, k = _check
board = np.array([board[(i,)], board[(j,)], board[(k,)]]).T
return winner_horizontal(board)
def moves(board):
return np.where(board == 0)[0]
def make_move(board, i, player):
board = np.copy(board)
board[i] = player
return board
def fork(board, player):
nwins = 0
for i in moves(board):
if winner(make_move(board, i, player)) == player:
nwins += 1
return nwins >= 2
def tictactoe1(board, player):
other = 3 - player
available_moves = moves(board)
corner_moves = [i for i in available_moves if i in {0,2,6,8}]
side_moves = [i for i in available_moves if i in {1,3,5,7}]
# Try to find a winning move
for i in available_moves:
if winner(make_move(board, i, player)) == player:
return i
# Try to block a winning move by the opponent
for i in available_moves:
if winner(make_move(board, i, other)) == other:
return i
# Pick a random move from the corners
if corner_moves:
return random.choice(corner_moves)
# Pick the center, if it is free
if 4 in available_moves:
return 4
# Pick one of the sides
return random.choice(side_moves)
def tictactoe2(board, player):
other = 3 - player
available_moves = moves(board)
corner_moves = [i for i in available_moves if i in {0,2,6,8}]
side_moves = [i for i in available_moves if i in {1,3,5,7}]
# Try to find a winning move
for i in available_moves:
if winner(make_move(board, i, player)) == player:
return i
# Try to block a winning move by the opponent
for i in available_moves:
if winner(make_move(board, i, other)) == other:
return i
# Try to find a forking move
for i in available_moves:
if fork(make_move(board, i, player), player):
return i
# Find possible forking moves by the opponent
for i in available_moves:
if fork(make_move(board, i, other), other):
return i
# Pick a random move from the corners
if corner_moves:
return random.choice(corner_moves)
# Pick the center, if it is free
if 4 in available_moves:
return 4
# Pick one of the sides
return random.choice(side_moves)
def tictactoe3(board, player):
other = 3 - player
available_moves = moves(board)
corner_moves = [i for i in available_moves if i in {0,2,6,8}]
side_moves = [i for i in available_moves if i in {1,3,5,7}]
# Try to find a winning move
for i in available_moves:
if winner(make_move(board, i, player)) == player:
return i
# Try to block a winning move by the opponent
for i in available_moves:
if winner(make_move(board, i, other)) == other:
return i
# Try to find a forking move
for i in available_moves:
if fork(make_move(board, i, player), player):
return i
# Find possible forking moves by the opponent
nforks = 0
blocking_move = None
for i in available_moves:
if fork(make_move(board, i, other), other):
nforks += 1
blocking_move = i
# If just one, block it. Otherwise force a block by the opponent by playing the side
if nforks == 1:
return blocking_move
elif nforks == 2 and side_moves:
return random.choice(side_moves)
# Pick the center, if it is free
if 4 in available_moves:
return 4
# Pick a random move from the corners
if corner_moves:
return random.choice(corner_moves)
# Pick one of the sides
return random.choice(side_moves)
def monkey(board, player):
return random.choice(moves(board))
def fight(ai1, ai2, n=10):
occ = [0, 0, 0]
for p1, p2, loc in [(ai1, ai2, [-1, 1, 2]), (ai2, ai1, [-1, 2, 1])]:
for _ in range(n):
board = np.zeros(9, dtype=np.int8)
for ai, player in it.cycle([(p1, 1), (p2, 2)]):
i = ai(board, player)
board = make_move(board, i, player)
win = winner(board)
if win:
occ[loc.index(win)] += 1
break
print(f'Draw: {occ[0]}, {ai1.__name__} wins: {occ[1]}, {ai2.__name__} wins: {occ[2]}')
| [
"michael.arlandoo@gmail.com"
] | michael.arlandoo@gmail.com |
216517ac51305fb90d8b4e5ea4fb6742af575ab2 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/response/AlipayEbppInvoiceEnterpriseconsumeEnterpriseopenruleQueryResponse.py | 5bb6da84803ce7b34c17434f46cfbd99b4b8b08a | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,200 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.EnterpriseOpenRuleInfo import EnterpriseOpenRuleInfo
class AlipayEbppInvoiceEnterpriseconsumeEnterpriseopenruleQueryResponse(AlipayResponse):
def __init__(self):
super(AlipayEbppInvoiceEnterpriseconsumeEnterpriseopenruleQueryResponse, self).__init__()
self._enterprise_open_rule_info = None
@property
def enterprise_open_rule_info(self):
return self._enterprise_open_rule_info
@enterprise_open_rule_info.setter
def enterprise_open_rule_info(self, value):
if isinstance(value, EnterpriseOpenRuleInfo):
self._enterprise_open_rule_info = value
else:
self._enterprise_open_rule_info = EnterpriseOpenRuleInfo.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(AlipayEbppInvoiceEnterpriseconsumeEnterpriseopenruleQueryResponse, self).parse_response_content(response_content)
if 'enterprise_open_rule_info' in response:
self.enterprise_open_rule_info = response['enterprise_open_rule_info']
| [
"jishupei.jsp@alibaba-inc.com"
] | jishupei.jsp@alibaba-inc.com |
a3d681b910eccac19a6a1fc2c1278cd65cb7ffb3 | d7e75f7743dad7c5d3b4a3175e53233b8fe7058f | /python/data-science/amostragem/amostragemEstratificada.py | 9253cf9a9bc8507b1efcf03f24b2211114284321 | [
"MIT"
] | permissive | paulo-mesquita/python-exercises | 40b471c53ebe188d582db5a289d723a24b067ec9 | c3ba969d35cab3d9a46a014bc4d93800daa85a50 | refs/heads/master | 2023-08-18T12:27:10.219480 | 2020-02-24T03:48:53 | 2020-02-24T03:48:53 | 242,413,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %%
import pandas as pd
from sklearn.model_selection import train_test_split
# %%
iris = pd.read_csv('/home/haboryn/Programacao/PYTHON/vscode/RecursosdoCurso/Dados/iris.csv')
# %%
iris['class'].value_counts()
# %%
x, _, y, _ = train_test_split(iris.iloc[:, 0:4], iris.iloc[:,4], test_size = 0.5, stratify = iris.iloc[:,4])
# %%
y.value_counts()
# %%
infert = pd.read_csv('/home/haboryn/Programacao/PYTHON/vscode/RecursosdoCurso/Dados/infert.csv')
# %%
infert['education'].value_counts()
# %%
x1, _, y1, _ = train_test_split(infert.iloc[:, 2:9], infert.iloc[:, 1], test_size = 0.6, stratify = infert.iloc[:, 1])
# stratify indica o campo o qual queremos manuzear
# .iloc - todas as linhas
# o traco _, serve para que nao carregue a outra parte da base de dados e traga apenas a amostra
y1.value_counts()
# %%
| [
"pauloclarinete1@hotmail.com"
] | pauloclarinete1@hotmail.com |
2f74eb35f88c93e6d1cc0f2c70e613a232900ae6 | 796e51c02030779e6a60011bb8830476f916f51c | /controllers/paint.py | 94970d773534a698e23348b3565dd9e200adbc9f | [
"WTFPL"
] | permissive | BlenderBQ/BBQ | 73065b1e67aef9e8d0aead568c7b01610ab0de84 | d723c77aa809136d7282cd068271dfdf0c91e884 | refs/heads/master | 2020-09-13T11:02:59.336757 | 2015-06-17T12:31:05 | 2015-06-17T12:31:05 | 18,838,124 | 7 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,205 | py | import Leap
from communication import send_long_command
from leaputils import MAX_X, MAX_Y, MAX_Z
class ColorListener(Leap.Listener):
"""
This listener is intended for painting mode.
It can change the color according to position of the hand with all five fingers opened
"""
def __init__(self, threshold = 1, length_threshold = 10, history_size = 30):
Leap.Listener.__init__(self)
self.threshold = threshold
self.length_threshold = length_threshold
self.history_size = history_size
self.history = []
def on_frame(self, controller):
# Get the most recent frame
frame = controller.frame()
# Need exactly one hand for this gesture
if len(frame.hands) is not 1:
del self.history[:]
return
hand = frame.hands[0]
# About five fingers must be visible
if len(hand.fingers) < 4:
del self.history[:]
return
self.history.append(hand.stabilized_palm_position)
# Limit history size
if len(self.history) > self.history_size:
self.history = self.history[:-self.history_size]
# Activate the gesture if there's enough change
variation = Leap.Vector()
for i in range(1, len(self.history)):
variation += self.history[i] - self.history[i-1]
if variation.magnitude >= self.threshold:
self.change_color(hand.stabilized_palm_position)
def change_color(self, position):
r, g, b = self.to_color(position)
r, g, b = min(1, r), min(1, g), min(1, b)
send_long_command('paint_color', {'r': r, 'g': g, 'b': b},
filters={'r': 'coordinate', 'g': 'coordinate', 'b': 'coordinate'})
def to_color(self, position):
"""
Convert a position in Leap space to a color in the RGB cube.
We use the subspace (0..250, 0..350, -50..0).
The RGB components are scaled to [0..1]
"""
# Translate
x = position.x
y = position.y
z = -position.z
# Scale
r = max(0, x / MAX_X)
g = max(0, y / MAX_Y)
b = max(0, z / MAX_Z)
return r, g, b
| [
"jean.marie.comets@gmail.com"
] | jean.marie.comets@gmail.com |
86eef8c73ad777d6500620f1b97ac62665198ffe | ef98d5244892d90fd8c2c1a7ead639003ceca757 | /easy/python/algo/reverse.py | 0297ecbfcff912b60dfd0b18407c63b329671ee4 | [] | no_license | Caprowni/hackerman | f8cdc79dcc99e33f9a93f384d388084b7ad36776 | 2debefcafb26aa06f17634ef7bfed2683d546b0f | refs/heads/master | 2022-11-16T04:45:15.865321 | 2020-07-10T16:46:02 | 2020-07-10T16:46:02 | 278,332,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | def reverse(x):
if x > 0:
new_int = int(str(x)[::-1])
else:
new_int = -1 * int(str(x*-1)[::-1])
print(new_int)
x = 321
y = -123
reverse(x)
reverse(y)
| [
"liamcaproni@gmail.com"
] | liamcaproni@gmail.com |
b57eae63b98f6b34e7992657c66ff754e82f0b30 | db41952df56d92531f34f6b1c0005b5581ba5994 | /tests/query/bugs/test_query_after_schema_altered_issue1185_1154_pr1606.py | dd9560b2a1cd566140d1d2f57bdeb99a4ff4f13d | [
"Apache-2.0",
"LicenseRef-scancode-commons-clause"
] | permissive | yixinglu/nebula-graph | 3d48e72ae4e1d0a46ec1cc4d4084c6fd0ed86160 | faf9cd44d818b953da98b5c922999560c89867bd | refs/heads/master | 2023-06-28T21:39:25.637732 | 2020-09-22T06:27:01 | 2020-09-22T06:27:01 | 288,141,709 | 1 | 0 | Apache-2.0 | 2020-08-17T09:45:46 | 2020-08-17T09:45:46 | null | UTF-8 | Python | false | false | 6,376 | py | # --coding:utf-8--
#
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
import time
from tests.common.nebula_test_suite import NebulaTestSuite
class TestQuery(NebulaTestSuite):
def test_add_prop(self):
cmd = 'FETCH PROP ON person 1004'
resp = self.execute_query(cmd)
expect_result = [[1004, 'Lisa', 8, 'female']]
print(cmd)
self.check_resp_succeeded(resp)
self.check_out_of_order_result(resp.rows, expect_result)
cmd = 'FETCH PROP ON is_teacher 2002->1004'
resp = self.execute_query(cmd)
expect_result = [[2002, 1004, 0, 2018, 2019]]
print(cmd)
self.check_resp_succeeded(resp)
self.check_out_of_order_result(resp.rows, expect_result)
resp = self.execute("ALTER TAG person ADD (birthplace string)")
self.check_resp_succeeded(resp)
resp = self.execute("ALTER EDGE is_teacher ADD (years int)")
self.check_resp_succeeded(resp)
time.sleep(self.delay);
cmd = 'FETCH PROP ON person 1004'
resp = self.execute_query(cmd)
expect_result = [[1004, 'Lisa', 8, 'female', '']]
print(cmd)
self.check_resp_succeeded(resp)
self.check_out_of_order_result(resp.rows, expect_result)
cmd = 'FETCH PROP ON is_teacher 2002->1004'
resp = self.execute_query(cmd)
expect_result = [[2002, 1004, 0, 2018, 2019, 0]]
print(cmd)
self.check_resp_succeeded(resp)
self.check_out_of_order_result(resp.rows, expect_result)
cmd = 'GO FROM 2002 OVER is_teacher '\
'YIELD is_teacher.start_year, is_teacher.years, $$.person.name, $$.person.birthplace'
resp = self.execute_query(cmd)
expect_result = [[2018, 0, 'Lisa', ''], [2018, 0, 'Peggy', ''], [2018, 0, 'Kevin', ''],
[2018, 0, 'WangLe', ''],[2017, 0, 'Sandy', ''], [2015, 0, 'Lynn', ''], [2015, 0, 'Bonnie', ''],
[2015, 0, 'Peter', ''], [2014, 0, 'XiaMei', '']]
print(cmd)
self.check_resp_succeeded(resp)
self.check_out_of_order_result(resp.rows, expect_result)
def test_alter_prop_type(self):
resp = self.execute('INSERT VERTEX person(name, age, gender, birthplace) VALUES\
1004:("Lisa", 8, "female", "Washington")')
self.check_resp_succeeded(resp)
resp = self.execute('INSERT EDGE is_teacher(start_year, end_year, years) VALUES\
2002->1004:(2018, 2019, 1)')
self.check_resp_succeeded(resp)
cmd = 'FETCH PROP ON person 1004'
resp = self.execute_query(cmd)
expect_result = [[1004, 'Lisa', 8, 'female', 'Washington']]
print(cmd)
self.check_resp_succeeded(resp)
self.check_out_of_order_result(resp.rows, expect_result)
cmd = 'FETCH PROP ON is_teacher 2002->1004'
resp = self.execute_query(cmd)
expect_result = [[2002, 1004, 0, 2018, 2019, 1]]
print(cmd)
self.check_resp_succeeded(resp)
self.check_out_of_order_result(resp.rows, expect_result)
cmd = 'GO FROM 2002 OVER is_teacher '\
'YIELD is_teacher.start_year, is_teacher.years, $$.person.name, $$.person.birthplace'
resp = self.execute_query(cmd)
expect_result = [[2018, 1, 'Lisa', 'Washington'], [2018, 0, 'Peggy', ''], [2018, 0, 'Kevin', ''],
[2018, 0, 'WangLe', ''],[2017, 0, 'Sandy', ''], [2015, 0, 'Lynn', ''], [2015, 0, 'Bonnie', ''],
[2015, 0, 'Peter', ''], [2014, 0, 'XiaMei', '']]
print(cmd)
self.check_resp_succeeded(resp)
self.check_out_of_order_result(resp.rows, expect_result)
resp = self.execute("ALTER TAG person CHANGE (birthplace int)")
self.check_resp_succeeded(resp)
resp = self.execute("ALTER EDGE is_teacher CHANGE (years string)")
self.check_resp_succeeded(resp)
time.sleep(self.delay);
cmd = 'FETCH PROP ON person 1004'
resp = self.execute_query(cmd)
expect_result = [[1004, 'Lisa', 8, 'female', 0]]
print(cmd)
self.check_resp_succeeded(resp)
self.check_out_of_order_result(resp.rows, expect_result)
cmd = 'FETCH PROP ON is_teacher 2002->1004'
resp = self.execute_query(cmd)
expect_result = [[2002, 1004, 0, 2018, 2019, '']]
print(cmd)
self.check_resp_succeeded(resp)
self.check_out_of_order_result(resp.rows, expect_result)
cmd = 'GO FROM 2002 OVER is_teacher '\
'YIELD is_teacher.start_year, is_teacher.years, $$.person.name, $$.person.birthplace'
resp = self.execute_query(cmd)
expect_result = [[2018, '', 'Lisa', 0], [2018, '', 'Peggy', 0], [2018, '', 'Kevin', 0],
[2018, '', 'WangLe', 0],[2017, '', 'Sandy', 0], [2015, '', 'Lynn', 0], [2015, '', 'Bonnie', 0],
[2015, '', 'Peter', 0], [2014, '', 'XiaMei', 0]]
print(cmd)
self.check_resp_succeeded(resp)
self.check_out_of_order_result(resp.rows, expect_result)
def test_delete_prop(self):
resp = self.execute("ALTER TAG person DROP (birthplace)")
self.check_resp_succeeded(resp)
resp = self.execute("ALTER EDGE is_teacher DROP (years)")
self.check_resp_succeeded(resp)
time.sleep(self.delay);
cmd = 'FETCH PROP ON person 1004'
resp = self.execute_query(cmd)
expect_result = [[1004, 'Lisa', 8, 'female']]
print(cmd)
self.check_resp_succeeded(resp)
self.check_out_of_order_result(resp.rows, expect_result)
cmd = 'FETCH PROP ON is_teacher 2002->1004'
resp = self.execute_query(cmd)
expect_result = [[2002, 1004, 0, 2018, 2019]]
print(cmd)
self.check_resp_succeeded(resp)
self.check_out_of_order_result(resp.rows, expect_result)
cmd = 'GO FROM 2002 OVER is_teacher YIELD $$.person.name as name, $$.student.grade as grade;'
resp = self.execute_query(cmd)
expect_result = [['Lisa', 3], ['Peggy', 3], ['Kevin', 3], ['WangLe', 3],
['Sandy', 4], ['Lynn', 5], ['Bonnie', 5], ['Peter', 5], ['XiaMei', 6]]
print(cmd)
self.check_resp_succeeded(resp)
| [
"noreply@github.com"
] | yixinglu.noreply@github.com |
ef4a12ccfb7a2b13c3f3e3447892d05dba16212d | 6f0ef8a30b9544e14dc76ba053b9f635dbc9f5aa | /api/views.py | 1b942051ed940aab112be2340bf3b01597eaf4e7 | [] | no_license | yoshiyasugimoto/drf_youtube | a83809be266cad35b9c4a101c2b6f20803c6f60b | 906bfe08beb4a1010c5135043172f9e0e456731e | refs/heads/master | 2023-01-02T00:58:03.505520 | 2020-10-20T15:21:08 | 2020-10-20T15:21:08 | 305,111,004 | 0 | 0 | null | 2020-10-20T15:21:10 | 2020-10-18T13:43:22 | Python | UTF-8 | Python | false | false | 447 | py | from rest_framework import viewsets
from rest_framework import generics
from .serializers import VideoSerializers, UserSerializer
from rest_framework.permissions import AllowAny
from .models import Video
class CreateUserView(generics.CreateAPIView):
serializer_class = UserSerializer
permission_classes = (AllowAny,)
class VideoViewSet(viewsets.ModelViewSet):
queryset = Video.objects.all()
serializer_class = VideoSerializers
| [
"y.sugimoto@jxpress.net"
] | y.sugimoto@jxpress.net |
f3497850b9714e04abcbc20718f6bce0e8f7a8a7 | d4601cab25572758bb0d4f34832a6018c4047581 | /belt_python/settings.py | d7eecc880570f79d43b12372f9c79cb4f2b0848b | [] | no_license | mabinnn/quotes | ba4436e485a6797734c4593e65cfca4af50487f3 | cbf9674140068979231d1a880fb23f9898740ed5 | refs/heads/master | 2021-01-20T03:17:45.702793 | 2017-04-26T20:19:01 | 2017-04-26T20:19:01 | 89,522,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,130 | py | """
Django settings for belt_python project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0#zdb^rc314$e3iqv$x-&%t0wxm!28m1693b=^ss3#8dky&a4@'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.belt_app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'belt_python.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'belt_python.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"marvin.alganes@gmail.com"
] | marvin.alganes@gmail.com |
14e4a43f8a1d2f0490b0464b20eace77b3771c27 | f43c8f0f722df416890042555f3ab340af4148c5 | /misc-scripts/parseglobalma.py | 8bd9407156bf3f6972c636351178014131a2ef92 | [
"MIT"
] | permissive | RenaKunisaki/StarFoxAdventures | 98e0b11df4f8b28bbe5eabe203b768ecfc01f1e7 | f7dc76f11f162d495cd86ca819f911946e5bfecd | refs/heads/master | 2023-07-23T18:45:34.192956 | 2023-07-10T19:19:43 | 2023-07-10T19:19:43 | 211,404,362 | 30 | 5 | MIT | 2022-05-05T20:10:19 | 2019-09-27T21:27:49 | C | UTF-8 | Python | false | false | 16,205 | py | #!/usr/bin/env python3
"""Read GLOBALMA.bin and generate a map grid image."""
import sys
import math
import struct
from PIL import Image, ImageDraw, ImageFont
def printf(fmt, *args, **kwargs):
print(fmt % args, end='', **kwargs)
def readStruct(file, fmt, offset=None):
size = struct.calcsize(fmt)
if offset is not None: file.seek(offset)
data = file.read(size)
r = struct.unpack(fmt, data)
if len(r) == 1: return r[0] # grumble
return r
def _mapIdToColor(id):
n = id + 1
id = 0
for i in range(8): id |= ((n >> i) & 1) << i
r = ((id >> 1) & 3) / 3
g = ((id >> 3) & 7) / 7
b = ((id >> 6) & 3) / 3
return int(r*255), int(g*255), int(b*255)
def _mapIdToHtml(id):
r, g, b = _mapIdToColor(id)
return '#%02X%02X%02X' % (r, g, b)
class MapGrid:
def __init__(self, width, height, originX=0, originY=0):
self.grid = []
self.width, self.height = width, height
self.originX, self.originY = originX, originY
self.xMin, self.yMin = 999999, 999999
self.xMax, self.yMax = -self.xMin, -self.yMin
for y in range(self.height - self.originY):
for x in range(self.width):
self.grid.append(None)
def _checkCoords(self, x, y):
idx = ((y - self.originY) * self.width) + (x - self.originX)
if idx < 0 or idx >= len(self.grid):
raise KeyError("Coords %d, %d out of range for grid %d,%d - %d,%d" % (
x, y,
self.originX, self.originY,
self.width - self.originX,
self.height - self.originY))
return idx
def set(self, x, y, val):
idx = self._checkCoords(x, y)
self.grid[idx] = val
self.xMin = min(x, self.xMin)
self.yMin = min(y, self.yMin)
self.xMax = max(x, self.xMax)
self.yMax = max(y, self.yMax)
def get(self, x, y):
idx = self._checkCoords(x, y)
return self.grid[idx]
class MapGridCell:
def __init__(self, x, y, layer, map, link1, link2):
self.x, self.y = x, y
self.layer, self.map = layer, map
self.linked = [link1, link2]
@classmethod
def read(cls, file):
x, y, ly, map, l1, l2 = readStruct(file, '>6h')
if map < 0: return None
return MapGridCell(x, y, ly, map, l1, l2)
class MapReader:
MIN_LAYER = -2
MAX_LAYER = 2
def __init__(self, discroot:str):
self.root = discroot
self.layer = {}
for i in range(self.MIN_LAYER, self.MAX_LAYER+1):
self.layer[i] = MapGrid(1024, 1024, -512, -512)
def run(self):
cells = self.readGlobalMap()
self.maps = self.readMapsBin()
# plot maps on grid
for cell in cells:
if cell.map >= 0 and cell.map < len(self.maps):
map = self.maps[cell.map]
for y in range(map['h']):
for x in range(map['w']):
gx = (x + cell.x) - map['x']
gy = (y + cell.y) - map['y']
bi = (y * map['w']) + x
block = map['blocks'][bi]
self.layer[cell.layer].set(gx, gy, {
'map': map,
'block': block,
'cell': cell,
})
else:
printf("Map %r is not in MAPS.bin\n", cell.map)
self.warps = self.readWarpTab()
self.mapInfo = self.readMapInfo()
for i, layer in self.layer.items():
self.layerToImage(layer, i)
# self.printLayer(layer, i)
#with open('globalmap.html', 'wt') as outFile:
# outFile.write('<html><head>')
# outFile.write('<link rel="stylesheet" href="globalmap.css" />')
# outFile.write('</head><body>')
# for i, layer in self.layer.items():
# outFile.write('<h1>Layer %d</h1>' % i)
# outFile.write(self.layerToHtml(layer, i))
# outFile.write('</body></html>')
def readGlobalMap(self, path='/globalma.bin'):
cells = []
with open(self.root+path, 'rb') as globalMa:
while True:
cell = MapGridCell.read(globalMa)
if cell is None: break
cells.append(cell)
return cells
def readMapsBin(self, path='/'):
entries = []
try:
bin = open(self.root+path+'MAPS.bin', 'rb')
tab = open(self.root+path+'MAPS.tab', 'rb')
idx = 0
while True:
try:
tabEntries = readStruct(tab, '>7i')
except struct.error as ex:
# we should be checking for an entry of 0xFFFFFFFF
# but that requires reading them one by one
break
if tabEntries[0] < 0: break
printf("MAPS.tab %02X = %08X %08X %08X %08X %08X %08X %08X\n",
idx, *tabEntries)
entry = {
'tab': tabEntries,
'map': idx,
'blocks': [],
}
entries.append(entry)
try:
bin.seek(tabEntries[0])
data = bin.read(8)
if data.startswith(b'\xFA\xCE\xFE\xED'):
printf("Map %02d blocks = FACEFEED\n", idx)
idx += 1
continue
w, h, x, y = struct.unpack('>4h', data)
except struct.error as ex:
printf("Error reading MAPS.bin entry at 0x%X: %s\n",
bin.tell(), ex)
break
entry['x'], entry['y'], entry['w'], entry['h'] = x,y,w,h
#printf("Map %02X rect is %d, %d, %dx%d\n", idx, x, y, w, h)
bin.seek(tabEntries[1])
for i in range(w*h):
try:
block = readStruct(bin, '>I')
except struct.error as ex:
printf("Error reading block entry at 0x%X: %s\n",
bin.tell(), ex)
break
unk1 = block >> 31
mod = (block >> 23) & 0xFF
sub = (block >> 17) & 0x3F
unk2 = block & 0x1FF
entry['blocks'].append({
'mod': mod,
'sub': sub,
'unk': (unk1, unk2)
})
idx += 1
return entries
finally:
bin.close()
tab.close()
def readWarpTab(self, path="/WARPTAB.bin"):
entries = []
with open(self.root+path, 'rb') as file:
i = 0
while True:
try:
x, y, z, ly, ang = readStruct(file, '>3f2h')
except struct.error: break
# there's no end-of-file marker here...
# for some reason angle is only the high byte of an s16,
# even though it's stored as a full s16 itself.
# ie it ranges from -128 to 127.
entries.append({
'x':x, 'y':y, 'z':z,
'cx':math.floor(x/640), 'cz':math.floor(z/640),
'idx':i, 'layer':ly,
'angle':(ang / 128) * 180,
})
return entries
def readMapInfo(self, path="/MAPINFO.bin"):
maps = []
with open(self.root+path, 'rb') as file:
while True:
try:
name, typ, unk2, unk3 = readStruct(file, '>28s2bh')
except struct.error: break
maps.append({
'name': name.replace(b'\0', b'').decode('utf-8'),
'type': typ,
'unk2': unk2,
'unk3': unk3,
})
return maps
def printLayer(self, layer, id):
printf("Layer %d is %d, %d - %d, %d\n ", id,
layer.xMin, layer.yMin,
layer.xMax, layer.yMax)
for x in range(layer.xMin, layer.xMax+1):
printf("%2d", abs(x))
printf("\n")
for y in range(layer.yMin, layer.yMax+1):
printf("%4d│", y)
for x in range(layer.xMin, layer.xMax+1):
cell = layer.get(x, y)
col = (x & 1) ^ (y & 1)
col = 19 if col == 0 else 17
printf("\x1B[48;5;%d;38;5;8m", col)
if cell is None: printf(" ")
else:
map = cell['map']
block = cell['block']
if block['mod'] == 0xFF:
printf("%02X", map['map'])
else: printf("\x1B[38;5;15m%02X", map['map'])
printf("\x1B[0m\n")
def layerToImage(self, layer, id):
scale = 16
lw = layer.xMax - layer.xMin
lh = layer.yMax - layer.yMin
w = lw * scale
h = lh * scale
img = Image.new('RGBA', (w+1, h+1), (32, 32, 32, 255))
draw = ImageDraw.Draw(img)
maps = {}
fnt = ImageFont.truetype(
'/usr/share/fonts/TTF/DejaVuSans.ttf', 13)
# plot the grid
for y in range(layer.yMin, layer.yMax+1):
for x in range(layer.xMin, layer.xMax+1):
px = (x-layer.xMin) * scale
py = (lh-(y-layer.yMin)) * scale
cell = layer.get(x, y)
if cell is not None:
map = cell['map']
block = cell['block']
r, g, b = _mapIdToColor(map['map'])
#r, g, b = 192, 192, 192
draw.rectangle(
(px, py, px+scale, py+scale), # x0 y0 x1 y1
fill=(r, g, b, 255),
outline=(128, 128, 128, 255))
if block['mod'] == 0xFF:
draw.rectangle(
(px, py, px+scale, py+scale), # x0 y0 x1 y1
fill=(r//2, g//2, b//2, 255),
outline=(128, 128, 128, 255))
draw.line((px, py, px+scale, py+scale),
fill=(128, 128, 128, 255), width=1)
mapId = map['map']
if mapId not in maps:
maps[mapId] = {
'xMin':x, 'yMin':y, 'xMax':x, 'yMax':y}
else:
m = maps[mapId]
m['xMin'] = min(m['xMin'], x)
m['yMin'] = min(m['yMin'], y)
m['xMax'] = max(m['xMax'], x)
m['yMax'] = max(m['yMax'], y)
else:
draw.rectangle(
(px, py, px+scale, py+scale), # x0 y0 x1 y1
fill=None,
outline=(64, 64, 64, 255))
# draw warps
for i, warp in enumerate(self.warps):
if warp['layer'] == id:
angle = warp['angle'] - 90
wx, wy = warp['cx'], warp['cz']
wx = (wx - layer.xMin) * scale
wy = (lh-(wy-layer.yMin)) * scale
draw.pieslice((
wx, wy, wx+scale, wy+scale
), angle-20, angle+20,
fill=(255, 0, 0, 128),
outline=(255, 255, 255, 255),
)
#draw.text((wx+scale, wy), "%02X" % i,
# fill=(255, 255, 255, 255),
# font=fnt, stroke_width=1, stroke_fill=(0, 0, 0, 128))
# put map names over cells
for mapId, map in maps.items():
x0, y0 = map['xMin'], map['yMin']-1
x1, y1 = map['xMax']+1, map['yMax']
px0 = (x0-layer.xMin) * scale
py0 = (lh-(y0-layer.yMin)) * scale
px1 = (x1-layer.xMin) * scale
py1 = (lh-(y1-layer.yMin)) * scale
pw = px1 - px0
ph = py1 - py0
#r, g, b = _mapIdToColor(mapId)
r, g, b = 192, 192, 192
draw.rectangle((px0, py0, px1, py1),
outline=(r, g, b, 255))
# draw name
text = "%02X %s" % (mapId, self.mapInfo[mapId]['name'])
size = draw.textsize(text, font=fnt, stroke_width=1)
tx = px0 + (pw/2) - (size[0]/2)
ty = py0 + (ph/2) - (size[1]/2)
if tx < 0: tx = 0
# HACK
if mapId == 0x37: ty += 16
#if ty < 0: ty = py0 + 2
#draw.text((tx, ty), text, fill=(r, g, b, 255),
# font=fnt, stroke_width=1, stroke_fill=(0, 0, 0, 128))
img.save("layer%d.png" % id, "PNG")
def layerToHtml(self, layer, id):
elems = ['<table><tr><th></th>']
for x in range(layer.xMin, layer.xMax+1):
elems.append('<th>%d</th>' % x)
elems.append('</tr>')
for y in range(layer.yMax, layer.yMin-1, -1):
elems.append('<tr><th>%d</th>' % y)
for x in range(layer.xMin, layer.xMax+1):
# this is slow but who cares
warpIdx = -1
for i, warp in enumerate(self.warps):
if (warp['layer'] == id
and warp['cx'] == x and warp['cz'] == y):
warpIdx = i
break
cell = layer.get(x, y)
if cell is None:
if warpIdx >= 0:
title = "%d, %d: no map\nWarp #%02X" % (
x, y, warpIdx)
elems.append('<td class="empty warp" title="%s">W%02X</td>' % (
title, warpIdx))
else: elems.append('<td class="empty"></td>')
else:
map = cell['map']
block = cell['block']
cls = 'map%02X' % map['map']
style = 'background:'+_mapIdToHtml(map['map'])
if block['mod'] == 0xFF:
cls += ' oob'
title = "%d, %d: out of bounds\n%02X %s" % (
x, y, map['map'],
self.mapInfo[map['map']]['name'])
else:
title = "%d, %d (%d, %d): mod%d.%d\n%02X %s" % (
x, y, x*640, y*640, block['mod'], block['sub'],
map['map'], self.mapInfo[map['map']]['name'])
if warpIdx >= 0:
cls += ' warp'
title += "\nWarp #%02X" % warpIdx
warpIdx = 'W%02X' % warpIdx
else: warpIdx = ''
text = '%02X %s\n%3d.%2d' % (
map['map'], warpIdx, block['mod'], block['sub']
)
if cell['cell'].linked[0] != -1:
cls += ' link'
links = []
link1 = cell['cell'].linked[0]
link2 = cell['cell'].linked[1]
if link1 >= 0:
links.append('%02X %s' % (link1, self.mapInfo[link1]['name']))
if link2 >= 0:
links.append('%02X %s' % (link2, self.mapInfo[link2]['name']))
title += '\nLinked: ' + ', '.join(links)
elems.append(
'<td class="%s" style="%s" title="%s">%s</td>' % (
cls, style, title, text))
elems.append('</tr>')
elems.append('</table>')
return ''.join(elems)
if __name__ == '__main__':
if len(sys.argv) > 1:
m = MapReader(sys.argv[1])
m.run()
else:
print("Usage: %s disc-root-path" % sys.argv[0])
| [
"hyperhacker@gmail.com"
] | hyperhacker@gmail.com |
f079d3ced19a69a7f12dcbd8037b1535e2934eda | e68940ae3ad37076693ba8f20e500577e152cec8 | /Subtask3/Analysis/plot.py | da8638e3a23bc728b27902fc54ff50c9b6daf7f3 | [] | no_license | arch1902/Traffic-density-estimation | 705b7cd9934a132d8f2035f27b7a0dc66d61cce9 | 159a1c409a05eebccc00bbb801e7b8662960cba0 | refs/heads/main | 2023-04-30T07:22:32.206809 | 2021-05-20T07:45:15 | 2021-05-20T07:45:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pandas as pd
import sys
df=pd.read_csv(sys.argv[1])
ax = plt.gca()
#df.plot(kind='line',x='frame',y='dynamic density', color = 'blue' , ax=ax)
df.plot(kind='line',x='time',y='dynamic density', color = 'red' , ax=ax)
plt.show() | [
"cs1190332@iitd.ac.in"
] | cs1190332@iitd.ac.in |
5f28a9aefa463f398ffd5a49c5f88a2014da21b2 | a8f3204139d7f68c23bd8411b8594899ba792e79 | /test/test_mgi.py | 9355c91926884348daa41be469666e8d52450f2a | [
"BSD-3-Clause"
] | permissive | switt4/sequana | 874189c869ccc07a592c0a6a3c77999adcabe025 | 7bd4f32607d62bebfd709628abc25bfda504761b | refs/heads/master | 2023-02-13T13:06:26.021426 | 2020-12-01T14:49:02 | 2020-12-01T14:49:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from sequana.mgi import MGI
from sequana import sequana_data
def test_mgi():
m = MGI(sequana_data("test_mgi.fqStat.txt"))
m.plot_acgt()
m.boxplot_quality()
| [
"cokelaer@gmail.com"
] | cokelaer@gmail.com |
416e3e836ff7e21d34d41fd5b7bed694f7d430b0 | c08ba954f014e0c768d3fa4ad55a0f21ca2e11c9 | /constants.py | 8da18b6a8530317f13adb8c8db167508f31bc281 | [] | no_license | k8k/Half-Full | 7821ba73fa2f78012a1205065758d544ad3b2133 | 58ab5b12d07d9359dab98294f4a03d5b584464b3 | refs/heads/master | 2021-01-25T03:48:13.429509 | 2014-12-14T22:48:04 | 2014-12-14T22:48:04 | 26,148,028 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,117 | py | import os
from instagram import client as instaclient
#Google Maps API Key for Typeahead & Lat Long
GOOGLE_MAPS_EMBED_KEY = os.environ.get("GOOGLE_MAPS_EMBED_KEY")
# Instagram Configuration
INSTAGRAM_CONFIG = {
'client_id': os.environ.get("INSTAGRAM_CLIENT_ID"),
'client_secret': os.environ.get("INSTAGRAM_CLIENT_SECRET"),
'access_token': os.environ.get("INSTAGRAM_ACCESS_TOKEN")
}
unauthenticated_api = instaclient.InstagramAPI(**INSTAGRAM_CONFIG)
# Foursquare Keys
FOURSQUARE_CLIENT_ID=os.environ.get('FOURSQUARE_CLIENT_ID')
FOURSQUARE_CLIENT_SECRET=os.environ.get('FOURSQUARE_CLIENT_SECRET')
# Twilio Keys, pulled in from system environment
TWILIO_ACCOUNT_SID = os.environ.get('TWILIO_ACCOUNT_SID')
TWILIO_AUTH_TOKEN = os.environ.get('TWILIO_AUTH_TOKEN')
TWILIO_NUMBER = os.environ.get('TWILIO_NUMBER')
#TWILIO MESSAGES
HELP_MESSAGE ="We didn't get that! please respond with 'City, Venue Name, SLAMMED / HALF FULL.\n(ex) 'San Francisco Blue Bottle SLAMMED'. "
ALTERNATE_INTRO = """If you meant one of the places below instead, just reply to this text with the corresponding number."""
| [
"kate.kuchin@gmail.com"
] | kate.kuchin@gmail.com |
587d5499f4095c8e2541f77a2b56546daa77f7a1 | ecee6e84ba18100b621c7e06f493ae48e44a34fe | /build/navigation/rotate_recovery/catkin_generated/pkg.develspace.context.pc.py | 8169a859e64afd3b5511ba8fc9b971732b77cb60 | [] | no_license | theleastinterestingcoder/Thesis | 6d59e06b16cbe1588a6454689248c88867de2094 | 3f6945f03a58f0eff105fe879401a7f1df6f0166 | refs/heads/master | 2016-09-05T15:30:26.501946 | 2015-05-11T14:34:15 | 2015-05-11T14:34:15 | 31,631,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 544 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/alfred/quan_ws/src/navigation/rotate_recovery/include".split(';') if "/home/alfred/quan_ws/src/navigation/rotate_recovery/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;pluginlib".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lrotate_recovery".split(';') if "-lrotate_recovery" != "" else []
PROJECT_NAME = "rotate_recovery"
PROJECT_SPACE_DIR = "/home/alfred/quan_ws/devel"
PROJECT_VERSION = "1.13.0"
| [
"quanzhou64@gmail.com"
] | quanzhou64@gmail.com |
02643e2e9992682bb3f5505b35db70922508a643 | 0425458c687aebf9129966955e75c3f2785ebc48 | /settings.py | 9af562aebcf5ddc75fa99675634756c42c0a17d6 | [] | no_license | alexxandre80/Projet_Python | 0e1682af7f88542e888fbf9e2513d71f2ee46885 | fcdb9e3e8553d5e9a2679d219e6ce1d649a3091b | refs/heads/master | 2020-04-23T17:18:12.705162 | 2019-02-18T17:31:53 | 2019-02-18T17:31:53 | 168,374,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,084 | py | import pygame
from pygame.locals import *
# Nom de la fenetre:
windowTitle = "Worms Python"
# Parametre de la taille de l'écran:
screen_width, screen_height = 1260, 640
screen_size = (screen_width, screen_height)
# FPS du jeu:
framesPerSecond = 90
# Parametre des couleurs:
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
GRAY = (64, 64, 64)
YELLOW = (255, 255, 0)
BLUE = (0, 0, 255)
RED = (255, 0, 0)
GREEN = (0, 255, 0)
CYAN = (0, 255, 255)
PINK = (255, 0, 255)
INFINANCE = (172, 58, 56)
# Parametre du joueur:
playerScale = 1
playerTurn = "1"
# Parametre du Terrain:
terrainCollision = True
blockScale = 0.15
terrainPreset = 1
terrainGenRandom = True
minTerrainHeight = 1
maxTerrainHeight = 40
smooth_factor = 5
# Parametre de la bar:
showBombSelector = True
actionBarScale = 1.5
actionBarPosition = "top_center"
iconBombScale = 0.20
iconRoquetteScale = 0.3
# Parametre du jeu:
done = None
winner = None
loser = None
# Parametre des bombes:
lastBombPosition = (0, 0)
bombHit = False
# Parametre de l'explosion:
explosionScale = 1
# Main Screen:
game_screen = "Main Screen"
| [
"mandra80080@gmail.com"
] | mandra80080@gmail.com |
a501a4ab05f3d9e89675e2356cd1b41b8b15c30b | a995f917e307be0d427cc9cfd3dbdd045abdd097 | /算法基础课/1.基础算法/AcWing 787. 归并排序.py | e744c5c260576841526218d12f96711f9577710f | [] | no_license | Andrewlearning/Leetcoding | 80d304e201588efa3ac93626021601f893bbf934 | 819fbc523f3b33742333b6b39b72337a24a26f7a | refs/heads/master | 2023-04-02T09:50:30.501811 | 2023-03-18T09:27:24 | 2023-03-18T09:27:24 | 243,919,298 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | def merge_sort(arr, l, r, temp):
if l >= r:
return
# 1.选取中点
mid = (l + r) // 2
# 2.递归排序
merge_sort(arr, l, mid, temp)
merge_sort(arr, mid + 1, r, temp)
# 3.归并操作,原数组的左右两半指针
i = l
j = mid + 1
# temp数组的指针
k = 0
while (i <= mid and j <= r):
if arr[i] <= arr[j]:
temp[k] = arr[i]
i += 1
else:
temp[k] = arr[j]
j += 1
k += 1
while i <= mid:
temp[k] = arr[i]
i += 1
k += 1
while j <= r:
temp[k] = arr[j]
j += 1
k += 1
# temp记录排序好的数组
# 然后更新到原数组上
i, j = l, 0
while i <= r:
arr[i] = temp[j]
i += 1
j += 1
if __name__ == '__main__':
n = int(input())
lst = list(map(int, input().split()))
temp = [0] * n
merge_sort(lst, 0, len(lst) - 1, temp)
print(' '.join(map(str, lst)))
# 链接:https://www.acwing.com/activity/content/code/content/111492/
| [
"yifu3@ualberta.ca"
] | yifu3@ualberta.ca |
54d4ba95c89de405777782330930d841dd39e5ad | 0b0d9981b2cd7ac7bb2a217beddcd8bcb2be4980 | /demo/aep_modbus_device_management_demo.py | a93328d91bed837d4ec19d93f3d4842451be9106 | [] | no_license | zztttt/iot | aab18871ab2c143b86f76976871691c105e535a5 | bb740f63a4381d5f6be1eb108bfcd476877cb31f | refs/heads/master | 2023-01-24T06:19:06.740411 | 2020-12-01T07:34:04 | 2020-12-01T07:34:04 | 315,022,535 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | #!/usr/bin/python
# encoding=utf-8
import sys
sys.path.append('..')
import apis.aep_modbus_device_management
if __name__ == '__main__':
result = apis.aep_modbus_device_management.UpdateDevice('dFI1lzE0EN2', 'xQcjrfNLvQ', 'cd35c680b6d647068861f7fd4e79d3f5', '10015488test', '{}')
print('result='+str(result))
result = apis.aep_modbus_device_management.CreateDevice('dFI1lzE0EN2', 'xQcjrfNLvQ', '{}')
print('result='+str(result))
result = apis.aep_modbus_device_management.QueryDevice('dFI1lzE0EN2', 'xQcjrfNLvQ', 'cd35c680b6d647068861f7fd4e79d3f5', '10015488test', 10015488)
print('result='+str(result))
result = apis.aep_modbus_device_management.QueryDeviceList('dFI1lzE0EN2', 'xQcjrfNLvQ', 'cd35c680b6d647068861f7fd4e79d3f5', 10015488)
print('result='+str(result))
result = apis.aep_modbus_device_management.DeleteDevice('dFI1lzE0EN2', 'xQcjrfNLvQ', 'cd35c680b6d647068861f7fd4e79d3f5', 10015488)
print('result='+str(result))
| [
"1315691850@qq.com"
] | 1315691850@qq.com |
cc2bdd9dbe3a97d3b2fb7568fffce836af1dff10 | 379219403ddb213c32d212bba51023c35e9f4753 | /aula13_ex47.py | 6d52171532464aa15adda9e139ea2cb07722f93e | [] | no_license | fpavanetti/python_lista_de_exercicios | fdb1ec09ee528a6c8cef4ede022fc7f4064c129a | 6a8f69d3d87b255038369bd4a8c5680062941330 | refs/heads/main | 2023-04-23T02:42:14.303989 | 2021-05-07T16:44:11 | 2021-05-07T16:44:11 | 365,290,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 441 | py | '''
DESAFIO 47 - CONTAGEM REGRESSIVA
Faça um programa que mostre na tela uma
contagem regressiva para o estouro de
fogos de artíficio, indo de 10 até 0,
com uma pausa de 1 segundo entre eles.
'''
from time import sleep
import emoji
print("Contagem regressiva para o ano novo!")
for c in range (10, 0-1, -1):
print(c)
sleep(1)
print(emoji.emojize(":fireworks: \033[1;33m FELIZ ANO NOVO!!! \033[m :fireworks:")) | [
"noreply@github.com"
] | fpavanetti.noreply@github.com |
bf070a7332f5a7b28abb2b2406721c387ecb1598 | d03c7a8d8f0a23252fb93b36068f93b99d298918 | /SAMSUNG/BOJ_14499.py | 1591055282f18a1e5a84cafad97522a0d37e60f1 | [] | no_license | mlleo/BOJ | 454e67ae29dd712066af42f8151848b7b653c241 | bbbcdb6ffd25c58e178547bdd088750f0a4fc414 | refs/heads/master | 2023-03-26T14:19:48.178936 | 2021-03-26T07:56:09 | 2021-03-26T07:56:09 | 349,947,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | import sys
def copy(x,y):
if maps[x][y] == 0:
maps[x][y] = dice[6]
else:
dice[6] = maps[x][y]
maps[x][y] = 0
n, m, x, y, k = map(int, sys.stdin.readline().split()) # 세로, 가로, 주사위 좌표 x,y, 명령 개수
maps = []
for _ in range(n):
maps.append(list(map(int, sys.stdin.readline().split())))
command = list(map(int, sys.stdin.readline().split()))
dice = [0] * 7
for com in command: # 명령 반복하면서
if com == 1: # 동쪽
# print("1번=================================================")
if y == m - 1: # 갈 수 없는 경우
continue
else:
tmp = dice[1]
dice[1] = dice[3]
dice[3] = dice[6]
dice[6] = dice[4]
dice[4] = tmp
y += 1
copy(x,y)
elif com == 2: # 서쪽
# print("2번=================================================")
if y == 0: # 갈 수 없는 경우
continue
else:
tmp = dice[4]
dice[4] = dice[6]
dice[6] = dice[3]
dice[3] = dice[1]
dice[1] = tmp
y -= 1
copy(x,y)
elif com == 3: # 북쪽
# print("3번=================================================")
if x == 0: # 갈 수 없는 경우
continue
else:
tmp = dice[2]
dice[2] = dice[1]
dice[1] = dice[5]
dice[5] = dice[6]
dice[6] = tmp
x -= 1
copy(x,y)
elif com == 4: # 남쪽
# print("4번=================================================")
if x == n - 1: # 갈 수 없는 경우
continue
else:
tmp = dice[2]
dice[2] = dice[6]
dice[6] = dice[5]
dice[5] = dice[1]
dice[1] = tmp
x += 1
copy(x, y)
print(dice[1]) # 가장 윗면 출력
| [
"noreply@github.com"
] | mlleo.noreply@github.com |
ea626ed0b36889cc3fcec0d07a45a29536374c8c | 79aebf106e030dcb936cdb42151bf35d9eb68b58 | /fileComplaint.py | baff7e1bf0ff269441b58b1de2ca8d3fab36611d | [] | no_license | XiaoyanZhang0999/CS322-Software-Desgn | f17d3fad968c615a9195f3de3559041896eafd40 | 00af19891dd1705cbef880626d614550c5be4ea9 | refs/heads/master | 2020-05-01T18:48:04.914447 | 2019-03-25T17:24:14 | 2019-03-25T17:24:14 | 177,632,168 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,143 | py | from file import *
class File_Complaint(File):
#class attribute
complaint = "File Complaint"
#initializer
def __init__(self, complaint_date, complaint_id, file_id, complaint_author, complaint_text):
self.complaint_date = complaint_date
self.complaint_id = complaint_id
self.file_id = file_id
self.complaint_author = complaint_author
self.complaint_text = complaint_text
#instance method
def complaint_information(self):
return '''
Complaint Information:
Type: {}
Date: {}
ID: {}
File: {}
Author: {}
Message: {}'''.format(self.complaint, self.complaint_date, self.complaint_id, self.file_id, self.complaint_author, self.complaint_text)
#testing
my_complaint = File_Complaint(datetime.date.today(), 321, 123, "Julia", "I don't like this file")
print(my_complaint.complaint_information())
# I'm creating two different complaint classes because I think it will be easier to make them children of another class
# One complaint class is a user complaint so is a child of user and the other is a file complaint so child of file class
| [
"noreply@github.com"
] | XiaoyanZhang0999.noreply@github.com |
6cda8a9b621d7b22747cb24005da8d573c68d285 | 8989c5fd9247560b71d6654ef6eae97f8339d691 | /scripts/pre_processing.py | f5ca114f1b880ca030b23644c307dd4dd1171094 | [
"MIT"
] | permissive | rohanjsuresh/extracted_keyword_validation | 649b36814a0f2bbb9a74f7ad4521104e99b0bd5e | 94e56c645c066d9d20097433b1716b3e76625b3d | refs/heads/main | 2023-07-04T06:59:56.292572 | 2021-08-04T00:37:24 | 2021-08-04T00:37:24 | 329,327,893 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,714 | py | from keyword_relation.models import Keyword_Pages
from django.shortcuts import render
from django.contrib.staticfiles.storage import staticfiles_storage
from django.conf.urls.static import static
import os
from django.conf import settings
import numpy as np
import random
from gensim.models import Word2Vec
import wikipedia
import networkx as nx
import random
import matplotlib.pyplot as plt
from gensim.models import Word2Vec
from nltk.corpus import wordnet
import pandas
import wikipediaapi
wiki_wiki = wikipediaapi.Wikipedia('en')
def run():
print("START")
# Get all keywords
all_keywords = Keyword_Pages.objects.all()
# count
count = 0
# iterate over all keywords
for keyword in all_keywords:
count += 1
if count % 1000 == 0:
print(count)
# get model datamodels
if keyword.google_graph_embedding == "":
model_path = os.path.join(settings.STATIC_ROOT,'../models/related_keywords_graph_embedding.model')
model = Word2Vec.load(model_path)
related_main = find_similar_keywords(model, keyword.keyword)
keyword.google_graph_embedding = related_main
keyword.save()
# get wiki path
if keyword.wiki_path == "":
print("Finding path for", keyword.keyword)
visited = set()
wiki_paths = wiki_bfs(keyword.keyword, "Glossary of computer science", visited, 0, [], 100)
wiki_path = get_probability_score(wiki_paths)
if wiki_path == "N/A":
wiki_path_str = wiki_path
else:
first = True
wiki_path_str = ""
for val in wiki_path:
if first:
wiki_path_str += val
first = False
else:
wiki_path_str += " --> " + val
print(wiki_path_str)
keyword.wiki_path = wiki_path_str
keyword.save()
def wiki_bfs(source, target, visited, num_found, found_paths, iter_limit):
queue = []
visited.add(source)
queue.append([source])
iter_count = 0
output = []
while len(queue) > 0 and iter_count <= iter_limit:
iter_count += 1
path_attempt = queue.pop(0)
v = path_attempt[-1]
if v == target.lower():
if path_attempt not in output:
output.append(path_attempt)
# print(output)
# for val in path_attempt:
# try:
# visited.remove(val)
# except:
# pass
visited.remove(target.lower())
iter_count = 0
if len(output) == 3:
# print("hit")
return output
try:
v = wiki_wiki.page(v)
except:
continue
edges = [x.lower() for x in v.links]
index_push = 0
for edge in edges:
if (edge in target.lower() or target.lower() in edge) and edge not in visited:
visited.add(edge)
new_path_attempt = path_attempt[:]
new_path_attempt.append(edge)
if edge == target.lower():
queue.insert(0, new_path_attempt)
index_push += 1
queue.insert(index_push, new_path_attempt)
# print(queue)
for edge in edges:
if edge not in visited:
visited.add(edge)
new_path_attempt = path_attempt[:]
new_path_attempt.append(edge)
queue.append(new_path_attempt)
# print("out", iter_count)
# print(len(queue))
return output
def get_probability_score(path):
if path == []:
return "N/A"
all_probs = []
for i in range(len(path)):
probabilities_path = []
for val in path[i]:
probabilities = 1/(len(wiki_wiki.page(val).links))
probabilities_path.append(probabilities)
all_probs.append((sum(probabilities_path), path[i]))
all_probs.sort(key = lambda x: x[0])
return all_probs[0][1]
# function to get related keywords
def find_similar_keywords(model, x):
output = ""
first = True
try:
count = 0
for node, _ in model.wv.most_similar(x):
if first:
output += node
first = False
else:
output += "|" + node
count += 1
if count >=5:
break
except:
# print(x, "not in graph")
output="NA"
return output | [
"rohan.suresh12@gmail.com"
] | rohan.suresh12@gmail.com |
972744e8cd7d968799613fd102bb9eb9d912e243 | 33195bfc9e62bb00ce54f050febb6a3a0929a34b | /ms_face_api/src/ms_face_api/face.py | f4e45a229df977cece372f41acae8dfe64ccfceb | [
"MIT"
] | permissive | LCAS/ros_web_apis | 8f48e08b52433d6d97173cac1debd45a41681110 | 4b42bcc3c970769e8c814525e566ae37b506f415 | refs/heads/master | 2021-10-24T20:33:27.444877 | 2019-03-28T15:54:42 | 2019-03-28T15:54:42 | 82,785,629 | 0 | 4 | MIT | 2019-03-28T15:54:43 | 2017-02-22T09:25:33 | Python | UTF-8 | Python | false | false | 6,400 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: face.py
Description: Face section of the Cognitive Face API.
"""
from . import util
def detect(image, face_id=True, landmarks=False, attributes=''):
"""Detect human faces in an image and returns face locations, and
optionally with `face_id`s, landmarks, and attributes.
Args:
image: A URL or a file path or a file-like object represents an image.
face_id: Optional parameter. Return `face_id`s of the detected faces or
not. The default value is `True`.
landmarks: Optional parameter. Return face landmarks of the detected
faces or not. The default value is `False`.
attributes: Optional parameter. Analyze and return the one or more
specified face attributes in the comma-separated string like
`age,gender`. Supported face attributes include age, gender,
headPose, smile, facialHair, and glasses. Note that each face
attribute analysis has additional computational and time cost.
Returns:
An array of face entries ranked by face rectangle size in descending
order. An empty response indicates no faces detected. A face entry may
contain the corresponding values depending on input parameters.
"""
url = 'detect'
headers, data, json = util.parse_image(image)
params = {
'returnFaceId': face_id and 'true' or 'false',
'returnFaceLandmarks': landmarks and 'true' or 'false',
'returnFaceAttributes': attributes,
}
return util.request('POST', url, headers=headers, params=params, json=json,
data=data)
def find_similars(face_id, face_list_id=None, face_ids=None,
max_candidates_return=20, mode='matchPerson'):
"""Given query face's `face_id`, to search the similar-looking faces from a
`face_id` array or a `face_list_id`.
Parameter `face_list_id` and `face_ids` should not be provided at the same
time.
Args:
face_id: `face_id` of the query face. User needs to call `face.detect`
first to get a valid `face_id`. Note that this `face_id` is not
persisted and will expire in 24 hours after the detection call.
face_list_id: An existing user-specified unique candidate face list,
created in `face_list.create`. Face list contains a set of
`persisted_face_ids` which are persisted and will never expire.
face_ids: An array of candidate `face_id`s. All of them are created by
`face.detect` and the `face_id`s will expire in 24 hours after the
detection call. The number of `face_id`s is limited to 1000.
max_candidates_return: Optional parameter. The number of top similar
faces returned. The valid range is [1, 1000]. It defaults to 20.
mode: Optional parameter. Similar face searching mode. It can be
"matchPerson" or "matchFace". It defaults to "matchPerson".
Returns:
An array of the most similar faces represented in `face_id` if the
input parameter is `face_ids` or `persisted_face_id` if the input
parameter is `face_list_id`.
"""
url = 'findsimilars'
json = {
'faceId': face_id,
'faceListId': face_list_id,
'faceIds': face_ids,
'maxNumOfCandidatesReturned': max_candidates_return,
'mode': mode,
}
return util.request('POST', url, json=json)
def group(face_ids):
"""Divide candidate faces into groups based on face similarity.
Args:
face_ids: An array of candidate `face_id`s created by `face.detect`.
The maximum is 1000 faces.
Returns:
one or more groups of similar faces (ranked by group size) and a
messyGroup.
"""
url = 'group'
json = {
'faceIds': face_ids,
}
return util.request('POST', url, json=json)
def identify(face_ids, person_group_id, max_candidates_return=1,
threshold=None):
"""Identify unknown faces from a person group.
Args:
face_ids: An array of query `face_id`s, created by the `face.detect`.
Each of the faces are identified independently. The valid number of
`face_ids` is between [1, 10].
person_group_id: `person_group_id` of the target person group, created
by `person_group.create`.
max_candidates_return: Optional parameter. The range of
`max_candidates_return` is between 1 and 5 (default is 1).
threshold: Optional parameter. Confidence threshold of identification,
used to judge whether one face belongs to one person. The range of
confidence threshold is [0, 1] (default specified by algorithm).
Returns:
The identified candidate person(s) for each query face(s).
"""
url = 'identify'
json = {
'personGroupId': person_group_id,
'faceIds': face_ids,
'maxNumOfCandidatesReturned': max_candidates_return,
'confidenceThreshold': threshold,
}
return util.request('POST', url, json=json)
def verify(face_id, another_face_id=None, person_group_id=None,
person_id=None):
"""Verify whether two faces belong to a same person or whether one face
belongs to a person.
For face to face verification, only `face_id` and `another_face_id` is
necessary. For face to person verification, only `face_id`,
`person_group_id` and `person_id` is needed.
Args:
face_id: `face_id` of one face, comes from `face.detect`.
another_face_id: `face_id` of another face, comes from `face.detect`.
person_group_id: Using existing `person_group_id` and `person_id` for
fast loading a specified person. `person_group_id` is created in
`person_group.create`.
person_id: Specify a certain person in a person group. `person_id` is
created in `person.create`.
Returns:
The verification result.
"""
url = 'verify'
json = {}
if another_face_id:
json.update({
'faceId1': face_id,
'faceId2': another_face_id,
})
else:
json.update({
'faceId': face_id,
'personGroupId': person_group_id,
'personId': person_id,
})
return util.request('POST', url, json=json)
| [
"marc@hanheide.net"
] | marc@hanheide.net |
d8d1812873a44c27109fa4743dfcfd87d8b54ca3 | d2f63dd0bb5bd8fa7e9ae4ca828cbfe710390f33 | /horizon/horizon/dashboards/nova/images_and_snapshots/snapshots/forms.py | aad9e6b93451418dbc9496b6625eebdf3778f553 | [
"Apache-2.0"
] | permissive | citrix-openstack/horizon | 4df36bec738a212cbb320b8ac4caf624a883815e | 7987e68f135895728f891c2377b589f701d8106e | HEAD | 2016-09-11T11:30:42.348228 | 2012-01-24T01:46:06 | 2012-01-24T01:46:06 | 2,492,995 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,224 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django import shortcuts
from django.contrib import messages
from django.utils.translation import ugettext as _
from openstackx.api import exceptions as api_exceptions
from horizon import api
from horizon import forms
LOG = logging.getLogger(__name__)
class CreateSnapshot(forms.SelfHandlingForm):
tenant_id = forms.CharField(widget=forms.HiddenInput())
instance_id = forms.CharField(widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
name = forms.CharField(max_length="20", label=_("Snapshot Name"))
def handle(self, request, data):
try:
LOG.info('Creating snapshot "%s"' % data['name'])
snapshot = api.snapshot_create(request,
data['instance_id'],
data['name'])
instance = api.server_get(request, data['instance_id'])
messages.info(request,
_('Snapshot "%(name)s" created for instance "%(inst)s"') %
{"name": data['name'], "inst": instance.name})
return shortcuts.redirect('horizon:nova:images_and_snapshots:'
'index')
except api_exceptions.ApiException, e:
msg = _('Error Creating Snapshot: %s') % e.message
LOG.exception(msg)
messages.error(request, msg)
return shortcuts.redirect(request.build_absolute_uri())
| [
"gabriel@strikeawe.com"
] | gabriel@strikeawe.com |
6ac12416da38e678965da9e9743e555dbedb0c0b | 3c49747c4e0f71e2e4888323b424032ce41a8355 | /pyseir/rt/infer_rt.py | 2bc0ce480bc69904887c84966e1616fc73e91230 | [
"MIT"
] | permissive | epius/covid-data-model | f522c8c250e313323b4cea67dc4e09d23fd1276b | 8e8322db3d250fb41c46b275b3386927fc28538e | refs/heads/main | 2023-04-11T23:17:14.130708 | 2021-05-04T13:12:31 | 2021-05-04T13:12:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,457 | py | from typing import Optional
from dataclasses import dataclass
from datetime import timedelta
import structlog
import numpy as np
import numba
import math
import pandas as pd
from covidactnow.datapublic.common_fields import CommonFields
from scipy import stats as sps
from matplotlib import pyplot as plt
from libs.datasets import combined_datasets
from libs import pipeline
# `timeseries` is used as a local name in this file, complicating importing it as a module name.
from libs.datasets.timeseries import OneRegionTimeseriesDataset
from pyseir import load_data
from pyseir.utils import RunArtifact
import pyseir.utils
from pyseir.rt.constants import InferRtConstants
from pyseir.rt import plotting, utils
rt_log = structlog.get_logger(__name__)
SQRT2PI = math.sqrt(2.0 * math.pi)
@numba.vectorize([numba.float64(numba.float64, numba.float64, numba.float64)], fastmath=True)
def normal_pdf(x, mean, std_deviation):
"""Probability density function at `x` of a normal distribution.
Args:
x: Value
mean: Mean of distribution
std_deviation: Standard deviation of distribution.
"""
u = (x - mean) / std_deviation
return math.exp(-0.5 * u ** 2) / (SQRT2PI * std_deviation)
@numba.njit(fastmath=True)
def pdf_vector(x, loc, scale):
"""Replacement for scipy pdf function."""
array = np.empty((x.size, loc.size))
for i, a in enumerate(x):
for j, b in enumerate(loc):
array[i, j] = normal_pdf(a, b, scale)
return array
@dataclass(frozen=True)
class RegionalInput:
_combined_data: OneRegionTimeseriesDataset
@property
def region(self):
return self._combined_data.region
@property
def display_name(self) -> str:
return str(self.region)
@property
def timeseries(self) -> OneRegionTimeseriesDataset:
return self._combined_data
@staticmethod
def from_regional_data(dataset: OneRegionTimeseriesDataset) -> "RegionalInput":
return RegionalInput(_combined_data=dataset)
@staticmethod
def from_region(region: pipeline.Region) -> "RegionalInput":
return RegionalInput(
_combined_data=combined_datasets.RegionalData.from_region(region).timeseries
)
@staticmethod
def from_fips(fips: str) -> "RegionalInput":
return RegionalInput.from_region(pipeline.Region.from_fips(fips))
def run_rt(
regional_input: RegionalInput,
include_testing_correction: bool = False,
figure_collector: Optional[list] = None,
) -> pd.DataFrame:
"""Entry Point for Infer Rt
Returns an empty DataFrame if inference was not possible.
"""
# Generate the Data Packet to Pass to RtInferenceEngine
smoothed_cases = _generate_input_data(
regional_input=regional_input,
include_testing_correction=include_testing_correction,
figure_collector=figure_collector,
)
if smoothed_cases is None:
rt_log.warning(
event="Infer Rt Skipped. No Data Passed Filter Requirements:",
region=regional_input.display_name,
)
return pd.DataFrame()
# Save a reference to instantiated engine (eventually I want to pull out the figure
# generation and saving so that I don't have to pass a display_name and fips into the class
engine = RtInferenceEngine(
smoothed_cases, display_name=regional_input.display_name, regional_input=regional_input,
)
# Generate the output DataFrame (consider renaming the function infer_all to be clearer)
output_df = engine.infer_all()
return output_df
def _generate_input_data(
regional_input: RegionalInput,
include_testing_correction: bool,
figure_collector: Optional[list],
) -> Optional[pd.Series]:
"""
Allow the RtInferenceEngine to be agnostic to aggregation level by handling the loading first
include_testing_correction: bool
If True, include a correction for testing increases and decreases.
"""
# TODO: Outlier Removal Before Test Correction
try:
times, observed_new_cases = load_data.calculate_new_case_data_by_region(
regional_input.timeseries,
t0=InferRtConstants.REF_DATE,
include_testing_correction=include_testing_correction,
)
except AssertionError as e:
rt_log.exception(
event="An AssertionError was raised in the loading of the data for the calculation of "
"the Infection Rate Metric",
region=regional_input.display_name,
)
return None
date = [InferRtConstants.REF_DATE + timedelta(days=int(t)) for t in times]
observed_new_cases = pd.Series(observed_new_cases, index=date)
observed_new_cases = filter_and_smooth_input_data(
observed_new_cases,
date,
regional_input.region,
figure_collector,
rt_log.new(region=regional_input.display_name),
)
return observed_new_cases
def filter_and_smooth_input_data(
cases: pd.Series,
dates: list,
region: pipeline.Region,
figure_collector: Optional[list],
log: structlog.BoundLoggerBase,
) -> Optional[pd.Series]:
"""Do Filtering Here Before it Gets to the Inference Engine"""
MIN_CUMULATIVE_CASE_COUNT = 20
MIN_INCIDENT_CASE_COUNT = 5
requirements = [ # All Must Be True
cases.count() > InferRtConstants.MIN_TIMESERIES_LENGTH,
cases.sum() > MIN_CUMULATIVE_CASE_COUNT,
cases.max() > MIN_INCIDENT_CASE_COUNT,
]
smoothed = cases.rolling(
InferRtConstants.COUNT_SMOOTHING_WINDOW_SIZE,
win_type="gaussian",
min_periods=InferRtConstants.COUNT_SMOOTHING_KERNEL_STD,
center=True,
).mean(std=InferRtConstants.COUNT_SMOOTHING_KERNEL_STD)
# TODO: Only start once non-zero to maintain backwards compatibility?
# Check if the Post Smoothed Meets the Requirements
requirements.append(smoothed.max() > MIN_INCIDENT_CASE_COUNT)
if not all(requirements):
return None
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(111) # plt.axes
ax.set_yscale("log")
chart_min = max(0.1, smoothed.min())
ax.set_ylim((chart_min, cases.max()))
plt.scatter(
dates[-len(cases) :], cases, alpha=0.3, label=f"Smoothing of: cases",
)
plt.plot(dates[-len(cases) :], smoothed)
plt.grid(True, which="both")
plt.xticks(rotation=30)
plt.xlim(min(dates[-len(cases) :]), max(dates) + timedelta(days=2))
if not figure_collector:
plot_path = pyseir.utils.get_run_artifact_path(region, RunArtifact.RT_SMOOTHING_REPORT)
plt.savefig(plot_path, bbox_inches="tight")
plt.close(fig)
else:
figure_collector["1_smoothed_cases"] = fig
return smoothed
class RtInferenceEngine:
"""
This class extends the analysis of Bettencourt et al to include mortality data in a
pseudo-non-parametric inference of R_t.
Parameters
----------
data: DataFrame
DataFrame with a Date index and at least one "cases" column.
display_name: str
Needed for Figures. Should just return figures along with dataframe and then deal with title
and save location somewhere downstream.
regional_input: RegionalInput
Just used for output paths. Should remove with display_name later.
"""
def __init__(
self, cases: pd.Series, display_name, regional_input: RegionalInput, figure_collector=None,
):
self.dates = cases.index
self.cases = cases
self.display_name = display_name
self.regional_input = regional_input
self.figure_collector = figure_collector
# Load the InferRtConstants (TODO: turn into class constants)
self.r_list = InferRtConstants.R_BUCKETS
self.window_size = InferRtConstants.COUNT_SMOOTHING_WINDOW_SIZE
self.kernel_std = InferRtConstants.COUNT_SMOOTHING_KERNEL_STD
self.default_process_sigma = InferRtConstants.DEFAULT_PROCESS_SIGMA
self.ref_date = InferRtConstants.REF_DATE
self.confidence_intervals = InferRtConstants.CONFIDENCE_INTERVALS
self.min_cases = InferRtConstants.MIN_COUNTS_TO_INFER
self.min_ts_length = InferRtConstants.MIN_TIMESERIES_LENGTH
self.serial_period = InferRtConstants.SERIAL_PERIOD
self.max_scaling_sigma = InferRtConstants.MAX_SCALING_OF_SIGMA
self.scale_sigma_from_count = InferRtConstants.SCALE_SIGMA_FROM_COUNT
self.tail_suppression_correction = InferRtConstants.TAIL_SUPPRESSION_CORRECTION
self.smooth_rt_map_composite = InferRtConstants.SMOOTH_RT_MAP_COMPOSITE
self.rt_smoothing_window_size = InferRtConstants.RT_SMOOTHING_WINDOW_SIZE
self.min_conf_width = InferRtConstants.MIN_CONF_WIDTH
self.log = structlog.getLogger(Rt_Inference_Target=self.display_name)
self.log_likelihood = None # TODO: Add this later. Not in init.
self.log.info(event="Running:")
def evaluate_head_tail_suppression(self):
"""
Evaluates how much time slows down (which suppresses Rt) as series approaches latest date
"""
timeseries = pd.Series(1.0 * np.arange(0, 2 * self.window_size))
smoothed = timeseries.rolling(
self.window_size, win_type="gaussian", min_periods=self.kernel_std, center=True
).mean(std=self.kernel_std)
delta = (smoothed - smoothed.shift()).tail(math.ceil(self.window_size / 2))
return delta[delta < 1.0]
def highest_density_interval(self, posteriors, ci):
"""
Given a PMF, generate the confidence bands.
Parameters
----------
posteriors: pd.DataFrame
Probability Mass Function to compute intervals for.
ci: float
Float confidence interval. Value of 0.95 will compute the upper and
lower bounds.
Returns
-------
ci_low: np.array
Low confidence intervals.
ci_high: np.array
High confidence intervals.
"""
posterior_cdfs = posteriors.values.cumsum(axis=0)
low_idx_list = np.argmin(np.abs(posterior_cdfs - (1 - ci)), axis=0)
high_idx_list = np.argmin(np.abs(posterior_cdfs - ci), axis=0)
ci_low = self.r_list[low_idx_list]
ci_high = self.r_list[high_idx_list]
return ci_low, ci_high
def make_process_matrix(self, timeseries_scale=InferRtConstants.SCALE_SIGMA_FROM_COUNT):
""" Externalizes process of generating the Gaussian process matrix adding the following:
1) Auto adjusts sigma from its default value for low counts - scales sigma up as
1/sqrt(count) up to a maximum factor of MAX_SCALING_OF_SIGMA
2) Ensures the smoothing (of the posterior when creating the prior) is symmetric
in R so that this process does not move argmax (the peak in probability)
"""
# TODO FOR ALEX: Please expand this and describe more clearly the meaning of these variables
a = self.max_scaling_sigma
if timeseries_scale == 0:
b = 1.0
else:
b = max(1.0, math.sqrt(self.scale_sigma_from_count / timeseries_scale))
use_sigma = min(a, b) * self.default_process_sigma
# Build process matrix using optimized numba pdf function.
# This function is equivalent to the following call, but runs about 50% faster:
# process_matrix = sps.norm(loc=self.r_list, scale=use_sigma).pdf(self.r_list[:, None])
process_matrix = pdf_vector(self.r_list, self.r_list, use_sigma)
# process_matrix applies gaussian smoothing to the previous posterior to make the prior.
# But when the gaussian is wide much of its distribution function can be outside of the
# range Reff = (0,10). When this happens the smoothing is not symmetric in R space. For
# R<1, when posteriors[previous_day]).argmax() < 50, this asymmetry can push the argmax of
# the prior >10 Reff bins (delta R = .2) on each new day. This was a large systematic error.
# Ensure smoothing window is symmetric in X direction around diagonal
# to avoid systematic drift towards middle (Reff = 5). This is done by
# ensuring the following matrix values are 0:
# 1 0 0 0 0 0 ... 0 0 0 0 0 0
# * * * 0 0 0 ... 0 0 0 0 0 0
# ...
# * * * * * * ... * * * * 0 0
# * * * * * * ... * * * * * *
# 0 0 * * * * ... * * * * * *
# ...
# 0 0 0 0 0 0 ... 0 0 0 * * *
# 0 0 0 0 0 0 ... 0 0 0 0 0 1
sz = len(self.r_list)
for row in range(0, sz):
if row < (sz - 1) / 2:
process_matrix[row, 2 * row + 1 : sz] = 0.0
elif row > (sz - 1) / 2:
process_matrix[row, 0 : sz - 2 * (sz - row)] = 0.0
# (3a) Normalize all rows to sum to 1
row_sums = process_matrix.sum(axis=1)
for row in range(0, sz):
process_matrix[row] = process_matrix[row] / row_sums[row]
return use_sigma, process_matrix
def get_posteriors(self, dates, timeseries, plot=False):
"""
Generate posteriors for R_t.
Parameters
----------
timeseries: New X per day (cases).
plot: bool
If True, plot a cool looking est of posteriors.
Returns
-------
dates: array-like
Input data over a subset of indices available after windowing.
times: array-like
Output integers since the reference date.
posteriors: pd.DataFrame
Posterior estimates for each timestamp with non-zero data.
start_idx: int
Index of first Rt value calculated from input data series
#TODO figure out why this value sometimes truncates the series
"""
if len(timeseries) == 0:
self.log.info("empty timeseries, skipping")
return None, None, None
else:
self.log.info("Analyzing posteriors for timeseries")
# (1) Calculate Lambda (the Poisson likelihood given the data) based on
# the observed increase from t-1 cases to t cases.
lam = timeseries[:-1].values * np.exp((self.r_list[:, None] - 1) / self.serial_period)
# (2) Calculate each day's likelihood over R_t
# Originally smoothed counts were rounded (as needed for sps.poisson.pmf below) which
# doesn't work well for low counts and introduces artifacts at rounding transitions. Now
# calculate for both ceiling and floor values and interpolate between to get smooth
# behaviour
ts_floor = timeseries.apply(np.floor).astype(int)
ts_ceil = timeseries.apply(np.ceil).astype(int)
ts_frac = timeseries - ts_floor
likelihoods_floor = pd.DataFrame(
data=sps.poisson.pmf(ts_floor[1:].values, lam),
index=self.r_list,
columns=timeseries.index[1:],
)
likelihoods_ceil = pd.DataFrame(
data=sps.poisson.pmf(ts_ceil[1:].values, lam),
index=self.r_list,
columns=timeseries.index[1:],
)
# Interpolate between value for ceiling and floor of smoothed counts
likelihoods = ts_frac * likelihoods_ceil + (1 - ts_frac) * likelihoods_floor
# (3) Create the (now scaled up for low counts) Gaussian Matrix
(current_sigma, process_matrix) = self.make_process_matrix(timeseries.median())
# (3a) Normalize all rows to sum to 1
process_matrix /= process_matrix.sum(axis=0)
# (4) Calculate the initial prior. Gamma mean of "a" with mode of "a-1".
prior0 = sps.gamma(a=2.5).pdf(self.r_list)
prior0 /= prior0.sum()
reinit_prior = sps.gamma(a=2).pdf(self.r_list)
reinit_prior /= reinit_prior.sum()
# Create a DataFrame that will hold our posteriors for each day
# Insert our prior as the first posterior.
posteriors = pd.DataFrame(
index=self.r_list, columns=timeseries.index, data={timeseries.index[0]: prior0}
)
# We said we'd keep track of the sum of the log of the probability
# of the data for maximum likelihood calculation.
log_likelihood = 0.0
# Initialize timeseries scale (used for auto sigma)
scale = timeseries.head(1).item()
# Setup monitoring for Reff lagging signal in daily likelihood
monitor = utils.LagMonitor(debug=False) # Set debug=True for detailed printout of daily lag
# (5) Iteratively apply Bayes' rule
loop_idx = 0
for previous_day, current_day in zip(timeseries.index[:-1], timeseries.index[1:]):
# Keep track of exponential moving average of scale of counts of timeseries
scale = 0.9 * scale + 0.1 * timeseries[current_day]
# Calculate process matrix for each day
(current_sigma, process_matrix) = self.make_process_matrix(scale)
# (5a) Calculate the new prior
current_prior = process_matrix @ posteriors[previous_day]
# (5b) Calculate the numerator of Bayes' Rule: P(k|R_t)P(R_t)
numerator = likelihoods[current_day] * current_prior
# (5c) Calculate the denominator of Bayes' Rule P(k)
denominator = np.sum(numerator)
# Execute full Bayes' Rule
if denominator == 0:
# Restart the baysian learning for the remaining series.
# This is necessary since otherwise NaN values
# will be inferred for all future days, after seeing
# a single (smoothed) zero value.
#
# We understand that restarting the posteriors with the
# re-initial prior may incur a start-up artifact as the posterior
# restabilizes, but we believe it's the current best
# solution for municipalities that have smoothed cases and
# deaths that dip down to zero, but then start to increase
# again.
posteriors[current_day] = reinit_prior
else:
posteriors[current_day] = numerator / denominator
# Monitors if posterior is lagging excessively behind signal in likelihood
# TODO future can return cumulative lag and use to scale sigma up only when needed
monitor.evaluate_lag_using_argmaxes(
current_day=loop_idx,
current_sigma=current_sigma,
prev_post_am=posteriors[previous_day].argmax(),
prior_am=current_prior.argmax(),
like_am=likelihoods[current_day].argmax(),
post_am=numerator.argmax(),
)
# Add to the running sum of log likelihoods
log_likelihood += np.log(denominator)
loop_idx += 1
self.log_likelihood = log_likelihood
if plot:
plotting.plot_posteriors(x=posteriors) # Returns Figure.
# The interpreter will handle this as it sees fit. Normal builds never call plot flag.
start_idx = -len(posteriors.columns)
return dates[start_idx:], posteriors, start_idx
def infer_all(self, plot=True) -> pd.DataFrame:
"""
Infer R_t from all available data sources.
Parameters
----------
plot: bool
If True, generate a plot of the inference.
Returns
-------
inference_results: pd.DataFrame
Columns containing MAP estimates and confidence intervals.
"""
df_all = None
df = pd.DataFrame()
try:
dates, posteriors, start_idx = self.get_posteriors(self.dates, self.cases)
except Exception as e:
rt_log.exception(
event="Posterior Calculation Error", region=self.regional_input.display_name,
)
raise e
# Note that it is possible for the dates to be missing days
# This can cause problems when:
# 1) computing posteriors that assume continuous data (above),
# 2) when merging data with variable keys
if posteriors is None:
return pd.DataFrame()
df[f"Rt_MAP__new_cases"] = posteriors.idxmax()
for ci in self.confidence_intervals:
ci_low, ci_high = self.highest_density_interval(posteriors, ci=ci)
low_val = 1 - ci
high_val = ci
df[f"Rt_ci{int(math.floor(100 * low_val))}__new_cases"] = ci_low
df[f"Rt_ci{int(math.floor(100 * high_val))}__new_cases"] = ci_high
df["date"] = dates
df = df.set_index("date")
df_all = df
df_all["Rt_MAP_composite"] = df_all["Rt_MAP__new_cases"]
df_all["Rt_ci95_composite"] = df_all["Rt_ci95__new_cases"]
# Correct for tail suppression
suppression = 1.0 * np.ones(len(df_all))
if self.tail_suppression_correction > 0.0:
tail_sup = self.evaluate_head_tail_suppression()
# Calculate rt suppression by smoothing delay at tail of sequence
suppression = np.concatenate(
[1.0 * np.ones(len(df_all) - len(tail_sup)), tail_sup.values]
)
# Adjust rt by undoing the suppression
df_all["Rt_MAP_composite"] = (df_all["Rt_MAP_composite"] - 1.0) / np.power(
suppression, self.tail_suppression_correction
) + 1.0
# Optionally Smooth just Rt_MAP_composite.
# Note this doesn't lag in time and preserves integral of Rteff over time
for i in range(0, self.smooth_rt_map_composite):
kernel_width = round(self.rt_smoothing_window_size / 4)
smoothed = (
df_all["Rt_MAP_composite"]
.rolling(
self.rt_smoothing_window_size,
win_type="gaussian",
min_periods=kernel_width,
center=True,
)
.mean(std=kernel_width)
)
# Adjust down confidence interval due to count smoothing over kernel_width values but
# not below .2
df_all["Rt_MAP_composite"] = smoothed
df_all["Rt_ci95_composite"] = (
(df_all["Rt_ci95_composite"] - df_all["Rt_MAP_composite"])
/ math.sqrt(
2.0 * kernel_width # averaging over many points reduces confidence interval
)
/ np.power(suppression, self.tail_suppression_correction / 2)
).apply(lambda v: max(v, self.min_conf_width)) + df_all["Rt_MAP_composite"]
if plot:
fig = plotting.plot_rt(df=df_all, display_name=self.display_name)
if self.figure_collector is None:
output_path = pyseir.utils.get_run_artifact_path(
self.regional_input.region, RunArtifact.RT_INFERENCE_REPORT
)
fig.savefig(output_path, bbox_inches="tight")
else:
self.figure_collector["3_Rt_inference"] = fig
if df_all.empty:
self.log.warning("Inference not possible")
else:
df_all = df_all.reset_index(drop=False) # Move date to column from index to column
df_all[CommonFields.LOCATION_ID] = self.regional_input.region.location_id
return df_all
| [
"noreply@github.com"
] | epius.noreply@github.com |
d3907705cdc10e25acb3a551dfcecb9608265182 | e02f2e3e159625b008ce94893f21504e2a62a95e | /send.py | c7a42c21e2f2551a6dca0f880fbe277190215bd5 | [] | no_license | hongjunhyeok/Code | c3ddcc917f576fe9a91666481d3ffaf1b72f29d4 | f1d25718a7c5b3b0d8ecfb012e15747761463b2c | refs/heads/master | 2021-05-13T21:14:56.288929 | 2018-01-13T06:52:53 | 2018-01-13T06:52:53 | 116,458,454 | 0 | 0 | null | 2018-01-08T09:44:07 | 2018-01-06T06:07:31 | null | UTF-8 | Python | false | false | 1,604 | py | from abc import ABCMeta
from abc import abstractmethod
class Sender(metaclass=ABCMeta):
def __init__(self,to=''):
self.to=to
@abstractmethod
def send(self,msg):
pass
def set_to(self,to):
self.to= to
## 탬플릿 : 기본양식이 정해져있고 바꿀거만 바꿔라
##처리절차는 정해져있으나 어떻게 처리할지는 안정해져있다.
def send_to(self,to,msg):
self.to=to
self.send(msg)
class Fax(Sender):
def __init__(self,to):
to=0
def send(self,msg):
print('=================')
print('팩스번호 :',self.to)
print('=================')
print('내용 : '+msg)
print('=================')
class SMS(Sender):
def __init__(self, to):
pass
def send(self, msg):
print('=================')
print('전화번호 :', self.to)
print('=================')
print('내용 : ' + msg)
class Email(Sender):
def __init__(self, to):
to=""
def send(self, msg):
print('=================')
print('이메일:', self.to)
print('=================')
print('내용 : ' + msg)
def send(s):
msg = input('내용')
s.send(msg)
print('전송했습니다.')
if __name__=='__main__':
to=input('수신자 번호를 입력하세요')
fax=Fax(to)
send(fax)
email=Email()
Email.send()
email.send_to('hong@naver.com','Hello Hong')
| [
"noreply@github.com"
] | hongjunhyeok.noreply@github.com |
1190c16c78aea4a60bd6c95b91fa8737499b53b0 | 95d32a98d0715816fd763c6df356069d91d74f33 | /021.py | 83ecc3eddb072db50e4b272f4ea5ba096ba4d2c3 | [] | no_license | jod35/coding-challenges | 1f65d08d92c143004f44eafd4922ec0dcb652a1f | 21cfa2853dac70055d2b20155e03dff1c235ee02 | refs/heads/master | 2022-12-14T22:31:37.344450 | 2020-09-18T19:47:51 | 2020-09-18T19:47:51 | 291,939,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | firstname=input("Enter your first name: ")
surname=input("Enter your surname: ")
print(f"{firstname} {surname} is of length {len(firstname+surname)}")
| [
"jodestrevin@gmail.com"
] | jodestrevin@gmail.com |
61ffd9051f2b0e3b9e6a1f7095c21855f59610cc | 05556e32155a8896a5e889011a2a18d0b50ca777 | /venv/bin/chardetect | d99436d194e2cdc83293a357ba3f51102cfe50fc | [] | no_license | viosonlee/pythonDemo | 6f660ebf9a61bfa1af553b5050987e6f0579f7e5 | 82b0f1e890c32360d12e0fd3525fd3e830a94d61 | refs/heads/master | 2020-08-17T21:55:13.392830 | 2019-10-17T06:08:05 | 2019-10-17T06:08:05 | 215,715,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 257 | #!/Users/lyf/PycharmProjects/pyDemo/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"772686608@qq.com"
] | 772686608@qq.com | |
7fc9b615e7df4e1127e7074d7fe91aa79268ee59 | b74cfe87d84e21ea35488528c20a78bc96e27fb6 | /boring/test/test_HP.py | 099d45c30e08bb1c27b86adc3302550823df27ce | [] | no_license | KL772/boring | 746dffc3c38705efac3b0dc574e86adae2f3e2e2 | 235640fe88075d88ae70f7012b991ac7d20effff | refs/heads/master | 2023-06-12T02:20:28.097938 | 2020-11-18T16:48:10 | 2020-11-18T16:48:10 | 306,090,999 | 0 | 1 | null | 2020-10-21T16:55:55 | 2020-10-21T16:55:53 | null | UTF-8 | Python | false | false | 1,941 | py | from __future__ import print_function, division, absolute_import
import unittest
import numpy as np
from openmdao.api import Problem, Group, IndepVarComp, n2
from openmdao.utils.assert_utils import assert_check_partials, assert_near_equal
from boring.util.spec_test import assert_match_spec
from boring.src.sizing.heat_pipe import OHP, FHP
class TestHP(unittest.TestCase):
def setUp(self):
p1 = self.prob = Problem(model=Group())
p1.model.add_subsystem('ohp', subsys=OHP(num_nodes=1))
p1.model.add_subsystem('fhp', subsys=FHP(num_nodes=1), promotes_inputs=[
('d_init','d_{init}'),('tot_len','L_{pack}'),('rho_FHP','rho_{HP}')
])
p1.setup(force_alloc_complex=True)
p1.run_model()
p1.model.list_outputs(values=True, prom_name=True)
def test_OHP(self):
assert_near_equal(self.prob.get_val('ohp.mass_OHP'), 385.90453402, tolerance=1.0E-5)
def test_FHP(self):
assert_near_equal(self.prob.get_val('fhp.fhp_mass'), 21256733.87035246, tolerance=1.0E-5)
# def test_partials(self):
# data = self.prob.check_partials(out_stream=None, method='cs')
# assert_check_partials(data, atol=1e-10, rtol=1e-10)
def test_FHP_io_spec(self):
p1 = self.prob = Problem(model=Group())
self.prob.model.set_input_defaults('ref_len', 240.)
self.prob.model.set_input_defaults('req_flux', 50.)
p1.model.add_subsystem('fhp', subsys=FHP(num_nodes=1), promotes_inputs=['ref_len','req_flux',
('d_init','d_{init}'),('tot_len','L_{pack}'),('rho_FHP','rho_{HP}')
],
promotes_outputs=[('fhp_mass','mass_{HP}'),('t_hp','t_{HP}')])
p1.setup()
#p1.model.list_inputs(prom_name=True)
assert_match_spec(p1.model, 'Design_specs/heat_pipe.json')
if __name__ =='__main__':
unittest.main() | [
"jchin89@gmail.com"
] | jchin89@gmail.com |
629ee4e7c81ff4e9e6348c37e8a40cbe8d289e66 | 93feace1d6d86ff792f1601b6a30a8083b00c6b8 | /ch11/counter.py | 3546258a828b50ccb903feb4ad5b11c645d26c95 | [] | no_license | huazhicai/algorithm | 1b520d9a9d6599050d43557e4c597f5bfa05abb1 | 60ba03b1751c5586fb5ee2c724b11545832fe2ac | refs/heads/master | 2021-06-24T14:07:34.815019 | 2018-05-16T00:39:49 | 2018-05-16T00:39:49 | 78,935,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 644 | py | #!/usr/bin/env python
def counter(start_at=0):
count = start_at
while True:
val = (yield count)
if val is not None:
count = val
else:
count += 1
if __name__ == '__main__':
print('initializing counter to start counting at 5')
count = counter(5)
print('calling count.__next__():', count.__next__())
print('calling count.__next__():', count.__next__())
print('calling count.send(9):', count.send(9))
print('calling count.__next__():', count.__next__())
print('calling count.close():', count.close())
# print('calling count.__next__():', count.__next__())
| [
"zhihua cai"
] | zhihua cai |
6cbad6fda2e9415b4cd7277f0b4755b46b47bb11 | d864b5fd0fd79f44c32372d160bdf11d94078199 | /tempCodeRunnerFile.py | ce1bb54cc5cd08358e9dda098fca08fd1bfedfd7 | [] | no_license | LakshayMahajan2006/JARVIS-3 | 0bc654360dbc91ccb4a30dbca71a54079fbb0e01 | 18fe18a53a43234dbc2722f6c1300700ea4cc38d | refs/heads/master | 2023-08-31T17:57:53.561936 | 2021-10-13T08:24:57 | 2021-10-13T08:24:57 | 416,650,365 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 111 | py | engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voices', voices[0].id) | [
"shashwat2910@gmail.com"
] | shashwat2910@gmail.com |
b3f1f12dee95f13f3ac1eb87d7cbd91e3c8e8663 | 1029a3ed15300fb7469dd0b06119248dcd8b08f9 | /setup.py | e96dc1beb8651f6e0cb9ce04b937bb1e11fff80e | [
"MIT"
] | permissive | itow0001/py_template | d3614aa195dd7414daf2e1f03695b8dd0af3b106 | 6ff36fcd541fcd03e4f8a9156394b8e62bb6f6bb | refs/heads/master | 2021-01-20T20:05:13.911803 | 2016-08-10T18:57:07 | 2016-08-10T18:57:07 | 65,408,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 914 | py | '''
Created on Apr 29, 2016
@author: iitow
'''
from setuptools import setup, find_packages
SRCDIR = 'src'
def readme():
''' Spits out README.rst for our long_description
with open('README.rst', 'r') as fobj:
return fobj.read()
'''
setup(
name='py_template',
version='1.0.1',
description="This is a template to create python project easily",
long_description=readme(),
author='Ian Itow',
author_email='itow0001@gmail.com',
url='https://github.com/itow0001/TEMPLATE',
license='MIT',
classifiers=[
'Topic :: Software Development :: Build Tools',
'Programming Language :: Python :: 2.7',
],
package_dir={'': SRCDIR},
packages=find_packages(SRCDIR),
zip_safe=False,
install_requires=[
],
entry_points={
'console_scripts': ['py_template = py_template.__main__:main']
},
include_package_data=True,
) | [
"iitow@isilon.com"
] | iitow@isilon.com |
75b8143913b6c4d373157950ba13c70fae4c927d | 9e9dd4c12da03c2eff07589f528cb850542a0684 | /tests/test_cli.py | b157fc11cd10f1145d86d5e65a84707d57f63294 | [
"MIT"
] | permissive | nekomamoushi/readms | 2f9a5a732cad52995174d1842f15b780041092fa | 9fbf97226296a5b4978940213f44c398a571b9a0 | refs/heads/master | 2020-05-27T00:35:31.601961 | 2019-05-24T13:32:50 | 2019-05-24T13:32:50 | 188,426,008 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 229 | py | import pytest
from click.testing import CliRunner
from readms.cli import cli
def test_cli():
runner = CliRunner()
result = runner.invoke(cli)
assert 0 == result.exit_code
assert "Using readms" in result.output
| [
"germione.grangere@gmail.com"
] | germione.grangere@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.