blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7698b9375f450dc9b2276802d116b7060c6bade2 | 98066411e2f957ce7c8e74abea17db74bc111943 | /code/app.py | da8726e12552849b520175d741c2e439462c0c33 | [] | no_license | codepatina/incub8-API | 975c492d47b0196ffc982f4fa266fe702d7dc1b3 | 8cf27663a0f29c58a77d44cf1677331e82ed9d33 | refs/heads/master | 2022-12-14T09:13:54.414077 | 2020-06-19T23:57:09 | 2020-06-19T23:57:09 | 206,235,076 | 0 | 0 | null | 2022-12-08T10:47:22 | 2019-09-04T05:00:19 | Python | UTF-8 | Python | false | false | 1,758 | py | from flask import Flask, jsonify
from flask_restful import Api
from flask_jwt import JWT
from flask_cors import CORS
from db import db
from ma import ma
import os
from security import authenticate, identity
from resources.user import UserRegister, User, UserList
from resources.idea import IdeaCreator, Idea, IdeaList
from resources.contributor_idea import ContributorIdea, ContributorList
from resources.external_link import ExternalLinkManager
from resources.link_parser import LinkParser
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DATABASE_URL', 'postgresql://localhost/incub8dev')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
CORS(app)
app.secret_key = 'Nael Saif Khan'
api = Api(app)
db.init_app(app)
@app.before_first_request
def create_tables():
db.create_all()
jwt = JWT(app, authenticate, identity, )
api.add_resource(UserRegister, '/register')
api.add_resource(User, '/user/<string:username>')
api.add_resource(UserList, '/users')
api.add_resource(IdeaCreator, '/idea')
api.add_resource(IdeaList, '/ideas')
api.add_resource(ContributorIdea, '/contributor')
api.add_resource(ContributorList, '/contributors')
api.add_resource(ExternalLinkManager, '/link')
api.add_resource(LinkParser, '/link_parser/')
@jwt.auth_response_handler
def customized_response_handler(access_token, identity):
return jsonify({
'access_token': access_token.decode('utf-8'),
'user_id': identity.id,
'username': identity.username
})
@jwt.jwt_error_handler
def customized_error_handler(error):
return jsonify({
'message': error.description,
'code': error.status_code
}), error.status_code
if __name__ == '__main__':
ma.init_app(app)
app.run(port=5001) | [
"jamesjacobthomas7@gmail.com"
] | jamesjacobthomas7@gmail.com |
2829c7610b87c98b5841e0a328f53342ce230728 | e1e07d1df467c4aa25032c552405b2ec87039c21 | /gui.py | 8a86e2124248daeb92ce6b36cee90377dcd03367 | [] | no_license | Ailsaka/data-site1 | 0b539396d24d529a8e1c9a1367e676475f06b525 | f4408c4745a278df04031eef657b7cd4d89d9c5f | refs/heads/master | 2023-08-11T12:42:38.385328 | 2021-09-18T01:58:08 | 2021-09-18T01:58:08 | 407,728,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | import tkinter
top = tkinter.Tk()
top.mainloop()
| [
"343795009@qq.com"
] | 343795009@qq.com |
34ff077858f4ae33064ecedb7125229e30d88e37 | 2d8ec841d75acb7ca3c3d1117c06d951e9be0169 | /test/X13_TestRomantoInteger.py | 3d16e1d47bbcdcf1ab7b8cd7deb848d1167d0c76 | [] | no_license | mcceTest/leetcode_py | 040aee95ed23674b7e2fea899d22945b12f85981 | eb25b3e5866b51fbac10d4686966f2c546c4696f | refs/heads/master | 2021-06-27T02:04:56.856659 | 2021-01-08T03:14:56 | 2021-01-08T03:14:56 | 205,760,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 461 | py | import unittest
from X13_RomantoInteger import Solution
class TestSum(unittest.TestCase):
def test1(self):
sol = Solution()
self.assertEqual(sol.romanToInt("III"), 3)
self.assertEqual(sol.romanToInt("IV"), 4)
self.assertEqual(sol.romanToInt("IX"), 9)
self.assertEqual(sol.romanToInt("LVIII"), 58)
self.assertEqual(sol.romanToInt("MCMXCIV"), 1994)
if __name__ == "__main__":
unittest.main() | [
"zhuxuyu@gmail.com"
] | zhuxuyu@gmail.com |
f3e0683296e93fce2124226a4e3038aa37254127 | 5c665f7c302eb5743b10cab85a6b62560b9ba23e | /myapp/models.py | 7347b5cceb75fbdab1b15524f8cf544b2f0cfc3c | [] | no_license | arabindamahato/Model_project | 45c9051582d0f19352002b6b0fbf53987e456bb7 | 9e59a1095b4a35837985ec464149b7b4b2c98460 | refs/heads/master | 2020-11-27T20:13:54.117267 | 2019-12-22T15:20:18 | 2019-12-22T15:20:18 | 229,587,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 688 | py | from django.db import models
from myapp.models import *
# Create your models here.
class Topic(models.Model):
top_name = models.CharField(max_length=120, primary_key=True)
def __str__(self):
return self.top_name
class Webpage(models.Model):
top_name = models.ForeignKey(Topic, on_delete=models.CASCADE)
name = models.CharField(max_length=50, primary_key=True)
url = models.URLField(unique=True)
def __str__(self):
return self.name
class Access_Details(models.Model):
name = models.ForeignKey(Webpage, on_delete=models.CASCADE)
date = models.DateField(unique=True)
def __str__(self):
return str(self.date)
| [
"abm21719@gmail.com"
] | abm21719@gmail.com |
5f077284b9378c25c9b257c5a84f50d6ad901a69 | c66b92cf71b02fbc77451172f8f797c26ac0194b | /inventory/inventory/settings.py | 9c263a57bc1f4a7c39b5c36fa96dcb6eff81ab6e | [] | no_license | partha-sharothi/inventory | 1900da7cf7add259752227d0758d58db7e6eaf6d | ad7f8ad2a835e2e8b81646496c45b82215135285 | refs/heads/main | 2023-04-01T10:24:32.540999 | 2021-03-11T12:47:26 | 2021-03-11T12:47:26 | 343,077,043 | 2 | 1 | null | 2021-02-28T11:26:07 | 2021-02-28T10:30:28 | null | UTF-8 | Python | false | false | 3,071 | py | """
Django settings for inventory project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%sk@paq&xv-*%%yx8rul2#u*p9zo!$r114cmtb5($oo04e4jr%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'inventory.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'inventory.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"p.sharothi@gmail.com"
] | p.sharothi@gmail.com |
8621072d0a559bab0f6b11891c97c6fcc0bbffd3 | 7f423ef4cce787ca1293a3a6eacb73cd8bb75140 | /practical_classes/prat_03/utils.py | 97eb220ba4b03ae321d903e5a63f0ef9c3d1b5dd | [] | no_license | EduRibeiro00/CovidForecast-feup-iart | e684e81e1f84b5f86a4b28910138ee5ee929fac3 | 375eb834e6759024fccedfd38e4ee43796e49e22 | refs/heads/master | 2022-12-24T02:04:32.163574 | 2020-10-03T12:18:03 | 2020-10-03T12:18:03 | 241,327,765 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | def create_initial_board():
board = []
for i in range(6): # 6 cells of height
line = []
for j in range(7): # 7 cells of width
line.append(' ')
board.append(line)
return board
def select_move_from_player(state, player_char):
while True:
valid_input = True
try:
col = int(input("Select a column for the next play: "))
new_state = state.get_move_for_column(player_char, col)
if new_state is None:
valid_input = False
except ValueError:
valid_input = False
if valid_input:
break
else:
print()
print('Invalid option. Please try again.')
return new_state
| [
"eribeiro306@gmail.com"
] | eribeiro306@gmail.com |
8c82696cc86a1efb213a7a192a0217afdb093508 | c40af6211b7e70621e505f64fb22597a1fc07753 | /makahiki/urls.py | 844c8e064a7e5039f59774315c449a15635b5579 | [
"MIT"
] | permissive | vijayanandau/KnowledgeShare | 4ab9dbd34f23125b0bda4af2bad8b7839cf05bce | 8b8764231de77cb8ae9ee2f63cc3c81d01af99a2 | refs/heads/master | 2016-08-04T09:13:09.839416 | 2013-05-09T04:52:39 | 2013-05-09T04:52:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,399 | py | """Defines the Main URLS."""
import os
from django.conf import settings
from django.conf.urls.defaults import url, patterns, include
from django.contrib import admin
from django.views.generic.simple import direct_to_template
from apps.admin.admin import sys_admin_site, challenge_designer_site, \
challenge_manager_site, developer_site
admin.autodiscover()
urlpatterns = patterns('',
# Main page.
url(r'^$', "apps.pages.views.root_index", name="root_index"),
# page urls
url(r'^test/$', "apps.pages.views.index", name="test_index"),
url(r'^home/$', "apps.pages.views.index", name="home_index"),
url(r'^help/$', "apps.pages.views.index", name="help_index"),
url(r'^learn/$', "apps.pages.views.index", name="learn_index"),
url(r'^profile/$', "apps.pages.views.index", name="profile_index"),
url(r'^energy/$', "apps.pages.views.index", name="energy_index"),
url(r'^water/$', "apps.pages.views.index", name="water_index"),
url(r'^news/$', "apps.pages.views.index", name="news_index"),
url(r'^win/$', "apps.pages.views.index", name="win_index"),
url(r'^advanced/$', 'apps.pages.views.index', name="advanced_index"),
url(r'^status/$', 'apps.pages.views.index', name="status_index"),
url(r'^pages/clear_cache/$', 'apps.pages.views.clear_cache', name="clear_cache"),
# system level
url(r'^log/', include('apps.managers.log_mgr.urls')),
url(r'^help/', include('apps.widgets.help.urls')),
url(r'^avatar/', include('apps.lib.avatar.urls')),
url(r'^facebook/$', include('apps.lib.facebook_api.urls')),
url(r'^account/cas/login/$', 'apps.lib.django_cas.views.login', name='cas_login'),
url(r'^account/cas/logout/$', 'apps.lib.django_cas.views.logout', name='cas_logout'),
url(r'^account/login/$', 'apps.managers.auth_mgr.views.login', name='account_login'),
url(r'^account/logout/$', 'apps.managers.auth_mgr.views.logout', name='account_logout'),
url(r'^admin/logout/$', 'apps.managers.auth_mgr.views.logout', name='admin_logout'),
#url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^sys_admin/', include(sys_admin_site.urls)),
url(r'^challenge_setting_admin/', include(challenge_designer_site.urls)),
url(r'^challenge_admin/', include(challenge_manager_site.urls)),
url(r'^developer_admin/', include(developer_site.urls)),
url(r'^admin/login-as/(?P<user_id>\d+)/$', 'apps.managers.auth_mgr.views.login_as',
name='account_login_as'),
url(r'^player/bulk_upload_form/$', 'apps.managers.player_mgr.views.bulk_upload_form',
name="bulk_upload_form"),
url(r'^player/bulk_create/$', 'apps.managers.player_mgr.views.bulk_create',
name="bulk_create"),
url(r'^landing/$', direct_to_template, {'template': 'landing.html'}, name='landing'),
url(r'^restricted/$', direct_to_template, {"template": 'restricted.html'}, name="restricted"),
url(r'^about/$', direct_to_template, {'template': 'about.html'}, name='about'),
url(r'^browser-check/$', direct_to_template, {'template': 'browser_check.html'},
name='browser_check'),
url(r'^coming-soon/$', direct_to_template, {'template': 'coming_soon.html'},
name='coming_soon'),
url(r'^badge-display/$', direct_to_template, {'template': 'admin/badge-display.html'},
name='badge-display'),
url(r'^theme-display/$', direct_to_template, {'template': 'theme.html'}, name="theme-display"),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
)
widgets = settings.INSTALLED_DEFAULT_WIDGET_APPS + \
settings.INSTALLED_COMMON_WIDGET_APPS + \
settings.INSTALLED_WIDGET_APPS
for widget in widgets:
if os.path.isfile(
"%s/apps/widgets/%s/urls.py" % (settings.PROJECT_ROOT, widget)):
urlpatterns += patterns('',
(r'^%s/' % widget, include('apps.widgets.%s.urls' % widget)), )
# use django to serve static files FOR NOW
urlpatterns += patterns('',
(r'^' + settings.STATIC_URL[1:] + '(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.STATIC_ROOT}), )
if settings.SERVE_MEDIA:
urlpatterns += patterns('',
(r'^' + settings.MEDIA_URL[1:] + '(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}), )
| [
"vijayanand_au@yahoo.com"
] | vijayanand_au@yahoo.com |
25d6f18b65829aab1932b3fa647b4706a108e3fe | e9ee8fd7cdb458810a978081024c91d974cd51b4 | /hackerrank/prob11.py | e3b20422b18d4a8127667281c45eb8dcc998af84 | [] | no_license | chunchuvishnuvardhan/hackerank-challenges- | aee712fafd535dc20b74e59c492b3a8e8098bd7f | 11328d066dfc8212f5649ec8dc1f8430e952b27b | refs/heads/main | 2023-03-02T15:19:45.209763 | 2021-02-06T10:02:18 | 2021-02-06T10:02:18 | 336,506,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,951 | py | def func(temp, l):
global m_sum, m
if len(l) == 0:
if temp > m_sum: m_sum = temp % m
return
for i in range(len(l[0])):
crap = ((temp % m) + ((l[0][i] ** 2) % m)) % m
func(crap, l[1:])
k, m = map(int, input().split())
l = []
for _ in range(k):
l.append(map(int, input().split())[1:])
m_sum = 0
temp = 0
for i in range(len(l[0])):
temp = (l[0][i] ** 2) % m
func(temp, l[1:])
print(m_sum)
t=int(input("enter the no fo test cases"))
def calculate(values):
if(values[0][0]==values[1][0] and values[0][1]==values[1][1] and values[0][2]==values[1][2] and values[0][3]==values[1][3] and values[0][4]==values[1][4]):
substract=int(values[0][5])-int(values[1][5])
hours=substract//100
minutes=substract%100
totaltime=hours*3600+minutes*60
print(totaltime)
else:
timegap=int(values[0][5])-int(values[1][5])
hours1=int((values[0][4].split())[0:2])*3600+int((values[0][4].split())[3:5])*60+int(values[0][4])[6:8]
hours2=int((values[1][4].split())[0:2])*3600+int((values[1][4].split())[3:5])*60+int(values[1][4])[6:8]
if(hours1>=hours2):
hourst=hours1-hours2
else:
hourst=hours2-hours1
if(int(values[0][1])>int(values[1][1])):
daysgap = int(values[0][1] - values[1][1]) * 24 * 60 * 60
else:
daysgap = int(values[1][1] - values[0][1]) * 24 * 60 * 60
totaltime=daysgap+hourst
hou=timegap//100
min=timegap%100
tt=hou*3600+min*60
print(totaltime-tt)
for i in range(0,t):
values=[]
for j in range(0,2):
arr = list(map(str, input(' ').split()))
values.append(arr)
calculate(values)
dict={1:'jan',2:'feb',3:'mar',4:'apr',5:'may',6:'jun',7:'jul',8:'aug',9:'sep',10:'oct',11:'nov',12:'dec'}
dict1={31:'jan',28:'feb',31:'mar',30:'apr',31:'may',} | [
"noreply@github.com"
] | noreply@github.com |
47aab50071f5fbf7c8baf10497ba93d8d6178a68 | 228e92f8a3e04ae03c26cd15e98aa545336d4841 | /data_analysis_programming/data.py | 60f744b185c27d066b6165b8249f30918a14e1cd | [] | no_license | RiyaMokashi/CourseraCapstone | eb04f72ad441db1797158df8411425e226fa1a52 | 733dfc0eeb68e2d306c117c14ff409ef9c684c55 | refs/heads/master | 2021-07-20T00:38:37.381212 | 2020-06-10T03:41:22 | 2020-06-10T03:41:22 | 176,571,934 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import xml.etree.cElementTree as ET
import pprint
import re
import codecs
import json
import audit as street_name_auditor
lower = re.compile(r'^([a-z]|_)*$')
lower_colon = re.compile(r'^([a-z]|_)*:([a-z]|_)*$')
problemchars = re.compile(r'[=\+/&<>;\'"\?%#$@\,\. \t\r\n]')
CREATED = [ "version", "changeset", "timestamp", "user", "uid"]
def shape_element(element):
node = {}
if element.tag == "node" or element.tag == "way" :
node["id"] = element.get("id")
node["type"] = element.tag
node["visible"] = element.get("visible")
node["created"] = {
"version" : element.get("version"),
"changeset" : element.get("changeset"),
"timestamp" : element.get("timestamp"),
"user" : element.get("user"),
"uid" : element.get("uid")
}
if element.get("lat") and element.get("lon"):
node["pos"] = [float(element.get("lat")), float(element.get("lon"))]
for child in list(element):
key_string = child.get("k")
if key_string:
keys = process_key_string(key_string)
node = handle_nested_keys(node, keys, child.get("v"))
elif element.tag == "way":
if child.tag == "nd":
if "node_refs" in node:
node["node_refs"].append(child.get("ref"))
else:
node["node_refs"] = [child.get("ref")]
return node
else:
return None
def process_key_string(string):
string = string.replace(' ', '_').replace('.', '_').replace('&', '_and_')
return string.lower().split(":")
def handle_nested_keys(node, keys, value):
if len(keys) == 1:
key, value = process_key_and_value(keys[0], value)
if value != None:
node[key] = value
else:
key = keys.pop(0)
if key in node:
sub_node = node[key]
else:
sub_node = {}
if isinstance(sub_node, dict):
node[key] = handle_nested_keys(sub_node, keys, value)
return node
street_name_mapping = [
{'road': 'Road',
'way': 'Way',
'St': 'Street',
'South 32ed Avenue East': 'South 32nd Avenue East'
'Ave': 'Avenue'}
]
def process_key_and_value(key, value):
if key == 'addr':
key = 'address'
elif key == 'street':
value = street_name_auditor.update_name(value, street_name_mapping)
return key, value
def process_map(file_in, pretty = False):
file_out = "{0}.json".format(file_in)
data = []
with codecs.open(file_out, "w") as fo:
for _, element in ET.iterparse(file_in):
el = shape_element(element)
if el:
data.append(el)
if pretty:
fo.write(json.dumps(el, indent=2)+"\n")
else:
fo.write(json.dumps(el) + "\n")
return data
| [
"noreply@github.com"
] | noreply@github.com |
c6babe31c92579da0bd0978e802bcc919d75cb70 | 627be1a7560b60ecb548a7b901480ab373090b12 | /common/config_http.py | 345d92f1fbb133235b66b37a1b44877027098441 | [] | no_license | huangcaiyan/ai-api-test | b3e06930ac6bf32070896363ef6c31d853fe7878 | 3d6bcec4978403a53f0595f80205ac0b6b01119b | refs/heads/master | 2020-03-13T21:07:57.780205 | 2018-05-11T11:10:18 | 2018-05-11T11:10:18 | 131,288,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,337 | py | from common.common_info import *
import requests
import logging
import json
# from base.Log import MyLog as Log
class ConfigHttp:
def __init__ ( self ):
self.url = None
self.headers = {}
self.params = {}
self.data = {}
# self.log = Log.get_log ()
# self.logger = self.log.get_logger ()
def set_url ( self , url ):
self.url = base_url + '/' + url
def set_headers ( self , headers ):
self.headers = headers
def set_params ( self , param ):
self.params = param
def set_data ( self , data ):
self.data = data
def post ( self ):
try:
resopnse = requests.post (self.url , data=self.data , headers=self.headers)
return resopnse
except TimeoutError as e:
logging.error ('Time Out!')
return None
if __name__ == '__main__':
print ('config_http')
url = 'http://10.75.2.121:9200/api/qa'
headers = {'Content-Type': 'application/json' , 'Accept': 'application/json'}
data = json.dumps ({
"question": '请问怎么选课' ,
"aiRobotId": 175 ,
})
configHttp = ConfigHttp ()
configHttp.set_url (url)
configHttp.set_headers (headers)
configHttp.set_data (data)
result = configHttp.post ()
print ('result=' , result)
| [
"374342187@qq.com"
] | 374342187@qq.com |
58ce0f362ccab2adca898c40948bd9c6215599a4 | 5f4c6abc2ef4ab2571a3add8e24627c4f43601c0 | /DCGAN/main.py | 2d3ff23f16b9ba92476a4ce0558028ed1befbf16 | [] | no_license | SmartAI/GAN | 34551b5302ab5f2d3e834c3bddac167208105b15 | 54e95039027a9b142b39f2888dfe69bf88b6cd0c | refs/heads/master | 2021-06-12T02:40:59.745229 | 2017-02-11T13:56:30 | 2017-02-11T13:56:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,824 | py | import os
import pprint
pp = pprint.PrettyPrinter()
import tensorflow as tf
from dcgan import DCGAN
from train import train
import inference
flags = tf.app.flags
# trainning params
flags.DEFINE_integer('epoch', 25, "Number of epochs to train")
flags.DEFINE_float('learning_rate', 0.0002, "Learning rate for Adam optimizer")
flags.DEFINE_float('beta1', 0.5, "Momentum term of Adam optimizer")
flags.DEFINE_integer('batch_size', 64, "Number of images in batch")
# model params
flags.DEFINE_integer("output_size", 64, "size of output images to produce")
flags.DEFINE_integer("z_dim", 100, "dimension of input noise vector")
flags.DEFINE_integer("c_dim", 3, "Dimension of image color")
flags.DEFINE_integer('gf_dim', 64, "Dimension of generator filters in first conv layer")
flags.DEFINE_integer('df_dim', 64, "Dimension of discriminator filters in first conv layer")
# dataset params
flags.DEFINE_string("data_dir", "data", "Path to datasets directory")
flags.DEFINE_string("dataset", "faces", "The name of dataset")
# flags for running
flags.DEFINE_string("experiment_name", "experiment", "Name of experiment for current run")
flags.DEFINE_boolean("train", False, "Train if True, otherwise test")
flags.DEFINE_integer("sample_size", 64, "Number of image to sample")
# directory params
flags.DEFINE_string("checkpoint_dir", "checkpoint", "Path to save the checkpoint data")
flags.DEFINE_string("sample_dir", "samples", "Path to save the image samples")
flags.DEFINE_string("log_dir", "logs", "Path to log for Tensorboard")
flags.DEFINE_string("image_ext", "jpg", "image extension to find")
FLAGS = flags.FLAGS
def main(_):
pp.pprint(FLAGS.__flags)
with tf.Session() as sess:
dcgan = DCGAN(sess, FLAGS)
if not os.path.exists(FLAGS.checkpoint_dir):
os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(os.path.join(FLAGS.sample_dir, dcgan.get_model_dir())):
os.makedirs(os.path.join(FLAGS.sample_dir, dcgan.get_model_dir()))
if not os.path.exists(os.path.join(FLAGS.log_dir, dcgan.get_model_dir())):
os.makedirs(os.path.join(FLAGS.log_dir, dcgan.get_model_dir()))
if dcgan.checkpoint_exists():
print "Loading checkpoints"
if dcgan.load():
print "Success"
else:
raise IOError("Could not read checkpoints from {}".format(
FLAGS.checkpoint_dir))
else:
if not FLAGS.train:
raise IOError("No checkpoints found")
print "No checkpoints found. Training from scratch"
dcgan.load()
if FLAGS.train:
train(dcgan)
print "Generating samples..."
inference.sample_images(dcgan)
inference.visualize_z(dcgan)
if __name__ == '__main__':
tf.app.run()
| [
"leomicv@gmail.com"
] | leomicv@gmail.com |
fd2a5e63757b0e5b0629c08709741ba588cd8f8b | 1ec5917fc2aa3ba64d3ca4e2e0454ab5e8784642 | /plugins/trezor/cmdline.py | 44b3c3c850b80c8d7e813d7b24f8b72551dd68d2 | [
"MIT"
] | permissive | htcang/CommerciumElectro | 19b5e6d072a0091a91f0d5cc51a0c9a8f92adcec | fc432c216a4c0668643cec7f4955f242b36369ec | refs/heads/master | 2020-03-22T11:37:00.894969 | 2018-07-14T16:44:37 | 2018-07-14T16:44:37 | 138,405,160 | 0 | 0 | MIT | 2018-06-23T13:54:52 | 2018-06-23T13:54:51 | null | UTF-8 | Python | false | false | 341 | py | from commerciumelectro.plugins import hook
from .trezor import TrezorPlugin
from ..hw_wallet import CmdLineHandler
class Plugin(TrezorPlugin):
handler = CmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
| [
"imac@iMacs-iMac.local"
] | imac@iMacs-iMac.local |
7f71b8025b0da4af24e6f067a0fed3393f846eb2 | af259acdd0acd341370c9d5386c444da6a7a28a6 | /Supervised-Learning-with-scikit-learn/04-Preprocessing-and-pipelines/04-Dropping-missing-data.py | 438c80f1a8b57e4ed94a02048b0011d9457d637e | [] | no_license | pace-noge/datacamp | fcd544f6478040660f7149b1a37bfd957eef9747 | eeffb8af233e7304c0f122a48e6b4f78ee7c650e | refs/heads/master | 2020-07-04T12:41:29.635167 | 2019-09-17T10:11:39 | 2019-09-17T10:11:39 | 202,289,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,804 | py | """
Dropping missing data
The voting dataset from Chapter 1 contained a bunch of missing values that we dealt with for you behind the scenes. Now, it's time for you to take care of these yourself!
The unprocessed dataset has been loaded into a DataFrame df. Explore it in the IPython Shell with the .head() method. You will see that there are certain data points labeled with a '?'. These denote missing values. As you saw in the video, different datasets encode missing values in different ways. Sometimes it may be a '9999', other times a 0 - real-world data can be very messy! If you're lucky, the missing values will already be encoded as NaN. We use NaN because it is an efficient and simplified way of internally representing missing data, and it lets us take advantage of pandas methods such as .dropna() and .fillna(), as well as scikit-learn's Imputation transformer Imputer().
In this exercise, your job is to convert the '?'s to NaNs, and then drop the rows that contain them from the DataFrame.
INSTRUCTION
-----------
Explore the DataFrame df in the IPython Shell. Notice how the missing value is represented.
Convert all '?' data points to np.nan.
Count the total number of NaNs using the .isnull() and .sum() methods. This has been done for you.
Drop the rows with missing values from df using .dropna().
Hit 'Submit Answer' to see how many rows were lost by dropping the missing values.
"""
# Convert '?' to NaN
df[df == '?'] = np.nan
# Print the number of NaNs
print(df.isnull().sum())
# Print shape of original DataFrame
print("Shape of Original DataFrame: {}".format(df.shape))
# Drop missing values and print shape of new DataFrame
df = df.dropna()
# Print shape of new DataFrame
print("Shape of DataFrame After Dropping All Rows with Missing Values: {}".format(df.shape))
| [
"noreply@github.com"
] | noreply@github.com |
6a5009db67a6650b9b7693d632d057ea3dd70bd0 | cd70f13d69ef249798a4bee64581125b88663987 | /scripts/roscore.py | 38ea7b3b8b6f16c7d6401f116512b7304d4a524f | [] | no_license | Trofleb/ROS-Android | 04afccd7b34e38b29a8fc8846cae43d4f02d5d11 | cc93fef12cc8b26d15153978e9c0cfd1442ceb68 | refs/heads/master | 2021-01-25T10:50:15.496421 | 2017-06-11T15:39:53 | 2017-06-11T15:39:53 | 93,882,124 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,254 | py | #!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys
from optparse import OptionParser
from rosmaster.master_api import NUM_WORKERS
import os
# Set environnement variables
os.environ["ROS_HOME"] = "/sdcard/.ros"
os.environ["ROS_MASTER_URI"] = "http://localhost:1111"
os.environ["ROS_ROOT"] = "/sdcard/.ros"
os.environ["ROS_PACKAGE_PATH"] = "/sdcard/.ros"
NAME = 'roscore'
def _get_optparse():
parser = OptionParser(usage="usage: %prog [options]",
prog=NAME,
description="roscore will start up a ROS Master, a ROS Parameter Server and a rosout logging node",
epilog="See http://www.ros.org/wiki/roscore"
)
parser.add_option("-p", "--port",
dest="port", default=None,
help="master port. Only valid if master is launched", metavar="PORT")
parser.add_option("-v", action="store_true",
dest="verbose", default=False,
help="verbose printing")
parser.add_option("-w", "--numworkers",
dest="num_workers", default=NUM_WORKERS, type=int,
help="override number of worker threads", metavar="NUM_WORKERS")
parser.add_option("-t", "--timeout",
dest="timeout",
help="override the socket connection timeout (in seconds).", metavar="TIMEOUT")
return parser
parser = _get_optparse()
(options, args) = parser.parse_args(sys.argv[1:])
if len(args) > 0:
parser.error("roscore does not take arguments")
import roslaunch
roslaunch.main(['roscore', '--core'] + sys.argv[1:])
| [
"nicolas.casademont@epfl.ch"
] | nicolas.casademont@epfl.ch |
e83bb1733b029cb548d60aede85ca7d9119a0266 | 7d838dd9394c5d71055fb318bf4ddc0f3f6cfebc | /users/migrations/0001_initial.py | f3bb9443dbb5848b9f0457eb62584831303be7a8 | [] | no_license | zack294/django-blog | 1f10f2c08a169bed91b6289a116547744dbc5984 | 88de2d2186bb236c5be49b9d7dd5b66c011d6b0e | refs/heads/master | 2022-11-30T22:15:39.718577 | 2020-08-14T08:21:42 | 2020-08-14T08:21:42 | 287,505,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 777 | py | # Generated by Django 3.0.6 on 2020-07-30 12:19
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(default='default.jpg', upload_to='profile_pics')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"59382089+zack294@users.noreply.github.com"
] | 59382089+zack294@users.noreply.github.com |
7cb848a19ea732db2c8ff988e3d899b87c141487 | 0ad82dcb39e3c35978b7923fffdbefaa0f86a9e2 | /a3.py | 8f4432666ba02b4d1743cd8cbc72c87a319bad27 | [] | no_license | pjc0618/ColorConversion | 6641fc614624fd8884a1722fd094a11695060622 | 760d789301f4da3e3fbffe6ef742795bb1badf3a | refs/heads/main | 2023-08-04T07:14:26.923196 | 2021-09-22T18:52:00 | 2021-09-22T18:52:00 | 409,320,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,110 | py | """
Functions for Assignment A3
This file contains the functions for the assignment. You should replace the stubs
with your own implementations.
Phil Cipollina(pjc272) and Luke Marcinkiewicz
October 4th, 2017
"""
import cornell
import math
def complement_rgb(rgb):
"""
Returns: the complement of color rgb.
Parameter rgb: the color to complement
Precondition: rgb is an RGB object
"""
red=255- rgb.red
green= 255- rgb.green
blue= 255- rgb.blue
return cornell.RGB(red,green,blue)
def round(number, places):
"""
Returns: the number rounded to the given number of decimal places.
The value returned is a float.
This function is more stable than the built-in round. The built-in round
has weird behavior where round(100.55,1) is 100.5 while round(100.45,1) is
also 100.5. We want to ensure that anything ending in a 5 is rounded UP.
It is possible to write this function without the second precondition on
places. If you want to do that, we leave that as an optional challenge.
Parameter number: the number to round to the given decimal place
Precondition: number is an int or float
Parameter places: the decimal place to round to
Precondition: places is an int; 0 <= places <= 3
"""
# To get the desired output, do the following
# 1. Shift the number "to the left" so that the position to round to is left of
# the decimal place. For example, if you are rounding 100.556 to the first
# decimal place, the number becomes 1005.56. If you are rounding to the second
# decimal place, it becomes 10055.6. If you are rounding 100.556 to the nearest
# integer, it remains 100.556.
conv1=number*(10**places)
#print('conv1 is ' + str(conv1))
# 2. Add 0.5 to this number
conv2=conv1+0.5
#print('conv2 is ' + str(conv2))
# 3. Convert the number to an int, cutting it off to the right of the decimal.
conv3=int(conv2)
#print('conv3 is ' + str(conv3))
# 4. Shift the number back "to the right" the same amount that you did to the left.
# Suppose that in step 1 you converted 100.556 to 1005.56. In this case,
# divide the number by 10 to put it back.
conv4=conv3/(10**places)
#print('conv4 is ' + str(conv4))
return conv4 # Stub
def str5(value):
"""
Returns: value as a string, but expand or round to be exactly 5 characters.
The decimal point counts as one of the five characters.
Examples:
str5(1.3546) is '1.355'.
str5(21.9954) is '22.00'.
str5(21.994) is '21.99'.
str5(130.59) is '130.6'.
str5(130.54) is '130.5'.
str5(1) is '1.000'.
Parameter value: the number to conver to a 5 character string.
Precondition: value is a number (int or float), 0 <= value <= 360.
"""
# Note:Obviously, you want to use the function round() that you just defined.
# However, remember that the rounding takes place at a different place depending
# on how big value is. Look at the examples in the specification.
val = str(value)
#print(val +'1')
dec = val.find('.')
#print(dec)
length = len(val)
#print(length)
if length<5 and dec== -1:
val = round(value,4-length)
val= str(val)
#print(val+ ' if final')
else:
val=round(value, 4-dec)
val= str(val)
if len(val)==4:
val=val+'0'
if len(val)==3:
val=val+'00'
if len(val)==2:
val=val+'000'
return val # Stub
def str5_cmyk(cmyk):
"""
Returns: String representation of cmyk in the form "(C, M, Y, K)".
In the output, each of C, M, Y, and K should be exactly 5 characters long.
Hence the output of this function is not the same as str(cmyk)
Example: if str(cmyk) is
'(0.0,31.3725490196,31.3725490196,0.0)'
then str5_cmyk(cmyk) is '(0.000, 31.37, 31.37, 0.000)'. Note the spaces after the
commas. These must be there.
Parameter cmyk: the color to convert to a string
Precondition: cmyk is an CMYK object.
"""
a=str5(cmyk.cyan)
b=str5(cmyk.magenta)
c=str5(cmyk.yellow)
d=str5(cmyk.black)
return '('+ a +', ' +b+ ', ' +c+', ' +d +')' # Stub
def str5_hsv(hsv):
"""
Returns: String representation of hsv in the form "(H, S, V)".
In the output, each of H, S, and V should be exactly 5 characters long.
Hence the output of this function is not the same as str(hsv)
Example: if str(hsv) is
'(0.0,0.313725490196,1.0)'
then str5_hsv(hsv) is '(0.000, 0.314, 1.000)'. Note the spaces after the
commas. These must be there.
Parameter hsv: the color to convert to a string
Precondition: hsv is an HSV object.
"""
a=str5(hsv.hue)
b=str5(hsv.saturation)
c=str5(hsv.value)
return '('+ a +', ' +b+ ', ' +c+')' # Stub
def rgb_to_cmyk(rgb):
"""
Returns: color rgb in space CMYK, with the most black possible.
Formulae from en.wikipedia.org/wiki/CMYK_color_model.
Parameter rgb: the color to convert to a CMYK object
Precondition: rgb is an RGB object
"""
# The RGB numbers are in the range 0..255.
# Change the RGB numbers to the range 0..1 by dividing them by 255.0.
nr = rgb.red/255.0
ng = rgb.green/255.0
nb = rgb.blue/255.0
C1 = 1-nr
Y1 = 1-nb
M1 = 1-ng
if C1 == 1 and M1 == 1 and Y1 == 1:
C = 0
M = 0
Y = 0
K = 1
else:
K = min(C1,M1,Y1)
C = (C1-K)/(1-K)
M = (M1-K)/(1-K)
Y = (Y1-K)/(1-K)
return cornell.CMYK(C*100.0,M*100.0,Y*100.0,K*100.0)
def cmyk_to_rgb(cmyk):
"""
Returns : color CMYK in space RGB.
Formulae from en.wikipedia.org/wiki/CMYK_color_model.
Parameter cmyk: the color to convert to a RGB object
Precondition: cmyk is an CMYK object.
"""
# The CMYK numbers are in the range 0.0..100.0. Deal with them in the
# same way as the RGB numbers in rgb_to_cmyk()
C = cmyk.cyan/100.0
M = cmyk.magenta/100.0
Y = cmyk.yellow/100.0
K = cmyk.black/100.0
r = (1-C)*(1-K)
g = (1-M)*(1-K)
b = (1-Y)*(1-K)
R = int(round(r*255.0,0))
G = int(round(g*255.0,0))
B = int(round(b*255.0,0))
return cornell.RGB(R,G,B)
def rgb_to_hsv(rgb):
"""
Return: color rgb in HSV color space.
Formulae from wikipedia.org/wiki/HSV_color_space.
Parameter rgb: the color to convert to a HSV object
Precondition: rgb is an RGB object
"""
# The RGB numbers are in the range 0..255.
# Change them to range 0..1 by dividing them by 255.0.
R=rgb.red/255.0
G=rgb.green/255.0
B=rgb.blue/255.0
MAX=max(R,G,B)
MIN=min(R,G,B)
if MAX==MIN:
H=0
elif MAX==R and G>=B:
H=60.0*(G-B)/(MAX-MIN)
elif MAX==R and G<B:
H=60.0*(G-B)/(MAX-MIN) + 360.0
elif MAX==G:
H= 60*(B-R)/(MAX-MIN) + 120.0
elif MAX==B:
H = 60*(R-G)/(MAX-MIN) + 240.0
if MAX==0:
S=0
else:
S=1-(MIN/MAX)
V=MAX
return cornell.HSV(H,S,V) # Stub
def hsv_to_rgb(hsv):
"""
Returns: color in RGB color space.
Formulae from http://en.wikipedia.org/wiki/HSV_color_space.
Parameter hsv: the color to convert to a RGB object
Precondition: hsv is an HSV object.
"""
H=hsv.hue
S=hsv.saturation
V=hsv.value
Hi=math.floor(H/60)
f = H/60 - Hi
p = V*(1-S)
q = V*(1-f*S)
t = V*(1-(1-f)*S)
if Hi==0:
R=V
G=t
B=p
elif Hi==1:
R=q
G=V
B=p
elif Hi==2:
R=p
G=V
B=t
elif Hi==3:
R=p
G=q
B=V
elif Hi==4:
R=t
G=p
B=V
elif Hi==5:
R=V
G=p
B=q
R=round(R*255.0,0)
G=round(G*255.0,0)
B=round(B*255.0,0)
return cornell.RGB(int(R),int(G),int(B)) # Stub
| [
"noreply@github.com"
] | noreply@github.com |
1597a6c9fe1277455f4aa1409ffa3eb31778d767 | 8403aa031ab51ebff345321bba1f16b6f70f4c69 | /vk.py | 4ab0ba837965815a1869a3e2a92711ba420b9b71 | [] | no_license | funkyOne/vk_group_stats | 4239a503ef0a496180cd416b4e72372be2b6b8fc | 97bc9a921c252575996b671eee2967bc31230c85 | refs/heads/master | 2020-12-24T13:29:08.297655 | 2020-03-10T13:38:17 | 2020-03-10T13:38:17 | 9,445,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,066 | py | # -*- coding: utf-8 -*-
import ConfigParser
import shelve
import sqlite3 as lite
import json
import pyzmail
from datetime import datetime, timedelta
from vk_api import ApiClient
fileName = 'vk.cfg'
# shelve_file = "vk.shelf"
# db_name = "vk.db"
# api_method_url_base = "https://api.vk.com/method/"
# app_id = 3362438
# app_secret = 'x4hGokusolKNorGHyoZA'
# username = 'bumashechka@gmail.com'
# password = 'xasiYezix'
'''
event_type:
0 - joined
1 - left
'''
def main():
settings = read_settings()
init_db(settings['db_name'])
api = ApiClient()
current_members_list = api.get_members()
last_members_list = get_current_members(settings['db_name'])
new_users = list(set(current_members_list) - set(last_members_list))
left_users = list(set(last_members_list) - set(current_members_list))
print "NEW USERS"
print new_users
print "LEFT USERS"
print left_users
add_joined(new_users, settings['db_name'])
removed_users = remove_left(left_users, settings['db_name'])
send_mail(settings,new_users,removed_users)
return
def init_db(db_name):
con = lite.connect(db_name)
with con:
cur = con.cursor()
cur.executescript("""
CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY, joined_at DATE, left_at DATE, is_in_group INT DEFAULT 1);
CREATE TABLE IF NOT EXISTS events
(
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id INTEGER REFERENCES Users(id),
happened_at DATE,
event_type INTEGER
);
CREATE INDEX IF NOT EXISTS main.events_by_time ON events (happened_at, event_type);
CREATE INDEX IF NOT EXISTS main.current_users ON users (is_in_group DESC);
""")
# current_members = read_members()
# add_joined(current_members)
def members_to_tuple(members):
return tuple(map(lambda m: (m,), members))
def get_current_members(db_name):
con = lite.connect(db_name)
con.row_factory = lite.Row
with con:
cur = con.cursor()
cur.execute("SELECT id from users where is_in_group=1")
ids = [row['id'] for row in cur.fetchall()]
return ids
def add_joined(members, db_name):
con = lite.connect(db_name)
members_tuple = members_to_tuple(members)
with con:
cur = con.cursor()
cur.executemany("INSERT INTO Users(id,joined_at) VALUES(?, date('now'))", members_tuple)
cur.executemany("INSERT INTO Events(user_id,happened_at,event_type) VALUES(?, date('now'),0)", members_tuple)
def remove_left(members,db_name):
con = lite.connect(db_name,detect_types=lite.PARSE_DECLTYPES)
con.row_factory = lite.Row
members_tuple = members_to_tuple(members)
with con:
cur = con.cursor()
cur.executemany("INSERT INTO Events(user_id,happened_at,event_type) VALUES(?,date('now'),1)", members_tuple)
cur.execute("UPDATE Users SET is_in_group=0, left_at=date('now') WHERE id IN (SELECT u.id FROM Users u JOIN Events e ON e.happened_at = date('now') AND e.user_id=u.id AND event_type=1)")
cur.execute("SELECT u.id, u.joined_at FROM Users u JOIN Events e ON e.happened_at = date('now') AND e.user_id=u.id AND event_type=1")
rows = cur.fetchall()
return rows
def read_settings():
config = ConfigParser.RawConfigParser()
config.read(fileName)
settings = {
'shelve_file' : config.get('main','shelve_file'),
'db_name' : config.get('main','db_name'),
'smtp_host':config.get('mail','smtp_host'),
'smtp_port':config.getint('mail','smtp_port'),
'smtp_mode':config.get('mail','smtp_mode'),
'smtp_login':config.get('mail','smtp_login'),
'smtp_password':config.get('mail','smtp_password'),
'recipients' : config.get('mail','recipients').split(', '),
}
return settings
def build_profile_url(id):
return 'http://vk.com/id{0}'.format(str(id))
def profile_li(id):
url = build_profile_url(id)
return '<li><a href="{0}"></a>{0}</li>'.format(url)
def left_profile_li(row):
url = build_profile_url(row['id'])
joined_at = row['joined_at']
return '<li><a href="{0}"></a>{0} (был(а) в группе {1} дней)</li>'.format(url,(datetime.now()-joined_at).days)
def send_mail(settings,joined, removed):
compose_mail(settings,joined, removed)
#process_mail(payload)
def unshelve_or_none(key, shelve_file):
shelf = shelve.open(shelve_file)
if shelf.has_key(key):
val = shelf[key]
shelf.close()
return val
shelf.close()
return None
def put_to_shelve(key, val, shelve_file):
shelf = shelve.open(shelve_file)
shelf[key]=val
shelf.close()
def compose_mail(settings, joined, left):
sender=(u'Питон', 'robot@stalobaloba.ru')
recipients=settings['recipients']
subject=u'{0} in, {1} out'.format(len(joined),len(left))
text_content=u'' # u'Bonjour aux Fran\xe7ais'
encoding='utf-8'
joined_html = ''.join (map(profile_li,joined))
left_html = ''.join (map(left_profile_li,left))
now = datetime.now()
last_run = unshelve_or_none('last_run', settings['shelve_file'])
put_to_shelve('last_run', now, settings['shelve_file'])
datestring = '{0:%d/%m/%y}'.format(now)
if last_run:
delta = now - last_run
if delta.days < 1:
datestring = '{0:%d/%m/%y %H:%M} - {1:%H:%M}'.format(last_run, now)
elif delta.seconds>100:
datestring = '{0:%d/%m/%y %H:%M} - {1:%d/%m/%y %H:%M}'.format(last_run, now)
html_content=u'<html><body><h1>Отчет о группе {0}<h1><h2>Новые пользователи</h2><ul>{1}</ul>\
<h2>Вышедшие пользователи</h2><ul>{2}</ul></body></html>'.format(datestring,joined_html,left_html)
payload, mail_from, rcpt_to, msg_id=pyzmail.compose_mail(\
sender, \
recipients, \
subject, \
encoding, \
None\
,(html_content, encoding))
ret=pyzmail.send_mail(payload, mail_from, rcpt_to, settings['smtp_host'], \
smtp_port=settings['smtp_port'], smtp_mode=settings['smtp_mode'], \
smtp_login=settings['smtp_login'], smtp_password=settings['smtp_password'])
if isinstance(ret, dict):
if ret:
print 'failed recipients:', ', '.join(ret.keys())
else:
print 'success'
else:
print 'error:', ret
if __name__ == "__main__":
main() | [
"oleg.novikdichko@gmail.com"
] | oleg.novikdichko@gmail.com |
86483ffded970770240b3c917772fa03d7a4cbf4 | a08effe848688ff9e0c234972674ca810eeae0c8 | /src/networking/client.py | a1e2220b1ac19839fb5932c38f17d0d85f9049d4 | [
"MIT"
] | permissive | Reathe/Qubic | 65a9bad9440e16345cb62492b1b7f3fe8dc7f6e1 | 7ee18eb6cb67ff3637f664dd225273c51ae19847 | refs/heads/master | 2021-05-21T05:48:30.686200 | 2021-01-21T11:28:58 | 2021-01-21T11:28:58 | 252,571,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,096 | py | import socket
from typing import Dict, Any, List
import jsonpickle
from model.qubic import Qubic
from networking.rooms import Room
class Client:
HOST, PORT = "qubicgame.ddns.net", 9999
# HOST, PORT = "localhost", 9999
def __init__(self, name, id=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = None
self.room_id = None
self.id = id
if self.id is None:
print(f'No id, could be found, registering as new player')
self._register(name)
def send(self, data: Dict[str, Any], size=48000) -> Dict[str, Any]:
"""
Sends data to server
Args:
data: the data to be sent
size: max size of the received data in bytes
pickle: pickled object
Returns:
the received data
"""
data = jsonpickle.encode(data)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
# Create a socket (SOCK_STREAM means a TCP socket)
# Connect to server and send request
sock.connect((self.HOST, self.PORT))
sock.sendall(bytes(data + "\n", "utf-8"))
# Receive request from the server and shut down
received = sock.recv(size).decode()
answer = jsonpickle.decode(received)
# print("Sent: {}".format(data))
# print("Received: {}".format(received))
return answer
def _register(self, player_name: str):
"""
registers the player in the server and sets the id and name attributes
Args:
player_name: the players name
"""
request = {
'type': 'register',
'player_name': player_name
}
result = self.send(request)
try:
self.id = result['player_id']
self.name = result['player_name']
except Exception as ex:
print(f'Client could not register:{ex}')
print(result)
def create(self, room_name: str) -> str:
"""
creates a room
Args:
room_name: the name
Returns:
the rooms id
"""
request = {
'type': 'create',
'room_name': room_name
}
result = self.send(request)
try:
return result['room_id']
except Exception as ex:
print(f'Could not create {ex}')
def join(self, room_id: str, spectator: bool):
"""
joins the room
Args:
room_id: the room to be joined
spectator: if you're joining as a spectator
Returns:
the id of the joined room
"""
request = {
'type': 'join',
'player_id': self.id,
'room_id': room_id,
'spectator': spectator
}
result = self.send(request)
try:
self.room_id = result['room_id']
return self.room_id
except Exception as ex:
print(f'Could not join {ex}')
def leave(self) -> bool:
if not self.room_id:
return False
request = {
'type': 'leave',
'player_id': self.id,
'room_id': self.room_id
}
result = self.send(request)
try:
return result['success']
except Exception as ex:
print(f'Could not leave {ex}')
def room_id_list(self) -> List[str]:
request = {
'type': 'room_id_list',
'player_id': self.id,
}
result = self.send(request)
try:
return result['room_id_list']
except Exception as ex:
print(f'Could not get room id list {ex}')
def room_list(self) -> List[Room]:
request = {
'type': 'room_list',
'player_id': self.id,
}
result = self.send(request)
try:
return result['room_list']
except Exception as ex:
print(f'Could not get room list {ex}')
def get_room(self) -> Room:
request = {
'type': 'room_get_by_id',
'player_id': self.id,
'room_id': self.room_id
}
result = self.send(request)
try:
return result['room']
except Exception as ex:
print(f'Could not get room {ex}')
def get_qubic(self) -> Qubic:
"""
Has to be called when in a room
Returns:
the Qubic
"""
request = {
'type': 'get_qubic',
'player_id': self.id,
'room_id': self.room_id
}
result = self.send(request)
try:
return result['qubic']
except Exception as ex:
print(f'Could not get qubic {ex}')
def qubic_place(self, pos):
request = {
'type': 'qubic_place',
'player_id': self.id,
'room_id': self.room_id,
'pos': pos
}
result = self.send(request)
try:
return result['qubic']
except Exception as ex:
print(f'Could not place piece in qubic {ex}')
| [
"bachourian@gmail.com"
] | bachourian@gmail.com |
6757338d0b65931a2f906bdc7f2b1f72184ecadb | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_schoolteachers.py | a97e1eb48efc47de30e4d91ff8c1c06736a2b31a | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py |
#calss header
class _SCHOOLTEACHERS():
def __init__(self,):
self.name = "SCHOOLTEACHERS"
self.definitions = schoolteacher
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['schoolteacher']
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
2bfb43e857a21235bcc195e7bed4c0f379692a15 | 1faeafcacf32c3e750bc766eae8ad8598b25e885 | /venv/bin/easy_install-3.8 | b82ec5078679d7e4b8dacb5225b0369b52fcd212 | [] | no_license | noahjillson/Graphing-a-Sinusoidal-Function | 0029327eb22a2ca6c024b753d165cc765fa1b80c | 806d081beb429255734a12f611cd90e67373cf93 | refs/heads/master | 2021-01-08T17:01:34.709546 | 2020-02-21T15:38:38 | 2020-02-21T15:38:38 | 242,087,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | 8 | #!/Users/noahjillson/Desktop/data_analysis_test/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
| [
"noahjillson@noahs-mbp.attlocal.net"
] | noahjillson@noahs-mbp.attlocal.net |
e2c1cf25c5a96bbea3825bb8afdbd8158e40a8b7 | 628d00c9b8dcd36861c35895ed9a5d3305facf5a | /tf.py | 09116b852fe934f460249df014b7df18a179507a | [] | no_license | skykobe/Reinforcement-learning | 6b17cd193d561790b7b52ad0e8a51acec42c665d | 49f57a6b66973fe0d1983818d69a9d1652d0fa25 | refs/heads/master | 2020-04-04T19:22:40.280920 | 2018-11-06T09:04:59 | 2018-11-06T09:04:59 | 156,203,470 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | import tensorflow as tf
i1 = tf.placeholder(tf.int32)
i2 = tf.placeholder(tf.int32)
out = tf.multiply(i1, i2)
sess = tf.Session()
out = print(sess.run(out, feed_dict={i1: [100], i2: [432]}))
| [
"zhuxiaohui@shin-yokohama.local"
] | zhuxiaohui@shin-yokohama.local |
e879ce09d32dd3e25d2b5d012e29d601fd22420c | b05646c751f97af09165496febd2a23a0f5e283a | /algorithm/moea.py | 57ccc3fcc851f574717e79f232eefd83448b016d | [] | no_license | outofstyle/MOBSA_MRP | d96b1a478426f7606cd604caa48d2c412036f888 | 97fe2902f00b144fd3b9fce135e0a61d8ebffa1b | refs/heads/master | 2021-09-12T07:13:02.370033 | 2018-04-15T08:07:38 | 2018-04-15T08:10:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 377 | py | class MultiObjectiveEvolutionaryAlgorithm(object):
def __init__(self, problem):
self.problem = problem
self.current_population = []
self.external_archive = []
def init_population(self):
pass
def update_archive(self):
pass
def run(self):
pass
def name(self):
pass
def show(self):
pass | [
"quanwenming@outlook.com"
] | quanwenming@outlook.com |
0b503253976c7ea5c0c3f9e0600789d35962bb8c | 9f206c2741163881f37a8123d0bedc34359da4ca | /app/models.py | 4a54cd395972b3700abed0cdeb0493f2118930a7 | [
"MIT"
] | permissive | OsmanMariam/On-the-blog | 6eaa41a04b330669fe4535b52668cb9dd227e400 | 455bdb22d6768ca607afaff3b8587e710738e85d | refs/heads/master | 2022-10-17T20:07:15.233294 | 2019-12-03T08:38:25 | 2019-12-03T08:38:25 | 224,815,084 | 0 | 0 | null | 2022-09-16T18:13:55 | 2019-11-29T08:54:23 | Python | UTF-8 | Python | false | false | 3,290 | py | from . import db
from werkzeug.security import generate_password_hash,check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
# Passes in a user id to this function and the function queries
# the database and gets a user's id as a response
class User(UserMixin,db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer,primary_key = True)
username = db.Column(db.String(255), index = True)
email = db.Column(db.String(255), unique=True, index=True)
bio = db.Column(db.String(255))
profile_pic_path = db.Column(db.String())
password_hash = db.Column(db.String(255))
blogpost = db.relationship('Blogpost', backref='user', lazy="dynamic")
comments = db.relationship("Comment", backref="user", lazy="dynamic")
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
is_admin = db.Column(db.Boolean, default=False)
@property
def password(self):
raise AttributeError('You cannot read the password attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return f'User {self.username}'
class Role(db.Model):
"""
Create a Role table
"""
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(60), unique=True)
description = db.Column(db.String(200))
users = db.relationship('User', backref='role',
lazy='dynamic')
def __repr__(self):
return '<Role: {}>'.format(self.name)
class Blogpost(db.Model):
__tablename__ = 'blogpost'
id = db.Column(db.Integer, primary_key=True)
title= db.Column(db.String(300), index=True)
content = db.Column(db.String(300), index=True)
category_id = db.Column(db.String)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
comments = db.relationship('Comment', backref='blogpost', lazy="dynamic")
time = db.Column(db.DateTime, default=datetime.utcnow)
def save_blogpost(self, blogpost):
''' Save the blogpost '''
db.session.add(blogpost)
db.session.commit()
# display blogpost
@classmethod
def get_blogposts(id):
blogposts = Blogpost.query.filter_by(category_id = id).all()
return blogposts
def __repr__(self):
return f"Blogpost('{self.id}', '{self.time}')"
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True)
post_comment = db.Column(db.String(255), index=True)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"))
blogposts = db.Column(db.Integer, db.ForeignKey('blogpost.id'))
time = db.Column(db.DateTime, default=datetime.utcnow)
def save_comment(self):
''' Save the comments '''
db.session.add(self)
db.session.commit()
# display comments
@classmethod
def get_comments(cls, id):
comments = Comment.query.filter_by(blogposts=id).all()
return comments
| [
"sufimariam8@gmail.com"
] | sufimariam8@gmail.com |
eadff18d5826fe8c172b84e1af89abe6bc1bc53a | 9c6dad048e5d2be07e262967c065a152f52c3095 | /3-largest-prime-factor.py | 927f041ab3409114dd47fb615a8455a6955c8abf | [] | no_license | anandman03/project-euler | 9c6ef3de8a2a510820023894b851b7708e1546db | 2d26afaf0577976cb83eb16a7d81bed38af9c74a | refs/heads/main | 2023-08-18T01:54:37.472484 | 2021-10-03T06:25:39 | 2021-10-03T06:25:39 | 410,455,141 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | def main():
target = 600851475143
limit = 10 ** 6 + 5
sieve = [True for i in range(limit)]
sieve[0] = sieve[1] = False
for i in range(2, limit):
if i * i > limit:
break
if sieve[i]:
for j in range(i * i, limit, i):
sieve[j] = False
ans = 0
for i in range(2, limit):
if sieve[i] and target % i == 0:
ans = i
print(ans)
if __name__ == '__main__':
main()
| [
"anandmansimar@gmail.com"
] | anandmansimar@gmail.com |
33937e6f1596f97dfca590e08588cf3f50c3644c | def311ab3c35db43f832516f4af39c5455ee4eee | /Computational Logic/domain/rapid_conversions.py | 8960824cea521b87aabc22c5f9bf9347501b9e96 | [] | no_license | VladCincean/homework | 598de65fc94ce5f2f0da4afee971041f0986836b | 83a1945bef30bd8b353d9c601b55c38bd6cbf404 | refs/heads/master | 2021-06-18T07:08:18.112450 | 2017-05-31T17:56:53 | 2017-05-31T17:56:53 | 73,409,436 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,535 | py | '''
Created on 7 dec. 2015
@author: Vlad Cincean
'''
from math import log2
def to_base_2(n, s):
'''
Performs a rapid conversion from base p to base 2
Input: n (string) - the number's representation in base s to convert
s (integer) - the source base (can be 4, 8 or 16)
Output: out (string) - the representation in base 2 of the number
'''
if s != 2 and s != 4 and s != 8 and s != 16:
raise ValueError("Error! Cannot perform a rapid conversion from the source base " + str(s))
MAP = {'0':'0000', '1':'0001', '2':'0010', '3':'0011', '4':'0100', '5':'0101', '6':'0110', '7':'0111',
'8':'1000', '9':'1001', 'A':'1010', 'B':'1011', 'C':'1100', 'D':'1101', 'E':'1110', 'F':'1111'}
''' we compute the result '''
out = ''
for digit in n:
out += MAP[digit][-int(log2(s)):]
''' we remove the insignificant zeros from the front of the result '''
while len(out) > 1 and out[0] == '0':
out = out[1:]
return out
def from_base_2(n, d):
'''
Performs a rapid conversion from base 2 to base d
Input: n (string) - the number's representation in base 2 to convert
d (integer) - the destination base (can be 4, 8 or 16)
Output: out (string) - the representation in base d of the number
'''
if d != 2 and d != 4 and d != 8 and d != 16:
raise ValueError("Error! Cannot perform a rapid conversion to the destination base " + str(d))
MAP = {'0000':'0', '0001':'1', '0010':'2', '0011':'3', '0100':'4', '0101':'5', '0110':'6', '0111':'7',
'1000':'8', '1001':'9', '1010':'A', '1011':'B', '1100':'C', '1101':'D', '1110':'E', '1111':'F'}
''' we add insignificant zeros in the front of the number's representation in base 2 '''
while len(n)%int(log2(d)) != 0:
n = '0' + n
''' we compute the result '''
out = ''
for i in range(0, len(n), int(log2(d))):
out += MAP[(4-int(log2(d)))*'0' + n[i:i+int(log2(d))]]
''' we remove the insignificant zeros from the front of the result '''
while len(out) > 1 and out[0] == '0':
out = out[1:]
return out
def rapid_convert(n, s, d):
'''
Performs a rapid conversion between two bases as powers of 2 (2, 4, 8 or 16)
Input: n (string) - the number's representation in base s
s (integer) - the source base
d (integer) - the destination base
Output: out (string) - the representation in base d of the number
'''
out = from_base_2(to_base_2(n, s), d)
return out
| [
"cvie1883@scs.ubbcluj.ro"
] | cvie1883@scs.ubbcluj.ro |
533164524bd7e942d8f7afa1f3078004c5cea911 | b759e0ae4bd2ac72b97e35ceb7036d9b3c3a361a | /i-kijiji/views.py | a0b89a7ceb1f777f23853bdd48c998e9aa7d453b | [] | no_license | Agana/i-Kijiji | 9aee7d33bb819fb10e3d5eb640ba84d568e6d538 | 8a195944579e92a7d44790a42a06fadf3a8f0886 | refs/heads/master | 2021-01-19T05:50:50.861554 | 2011-08-02T07:35:41 | 2011-08-02T07:35:41 | 2,100,729 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,897 | py | from django.template import Context, loader
from django.http import HttpResponse
from models import *
from django import forms
from django.forms import ModelForm
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.core.mail import send_mail
from django.core.files.uploadedfile import SimpleUploadedFile
def site_list(request, limit=15):
site_list=TouristSite.objects.all()
type_hotel = TouristSite(site_type='Hotel')
type_park = TouristSite(site_type='Park')
type_forest_reserve = TouristSite(site_type='Forest Reserve')
type_wetland = TouristSite(site_type='Wetland')
type_heritage_site = TouristSite(site_type='Heritage Site')
type_castle = TouristSite(site_type='Fort/Castle')
type_museum = TouristSite(site_type='Museum')
t = loader.get_template('i-kijiji/sitelist.html')
c = Context({'site_list':site_list,'user':str(request.user), 'type_hotel':type_hotel, 'type_park':type_park, 'type_forest_reserve':type_forest_reserve, 'type_wetland':type_wetland, 'type_heritage_site':type_heritage_site, 'type_castle':type_castle, 'type_museum':type_museum})
return HttpResponse(t.render(c))
# For Villages
@csrf_exempt
def v_region_list(request, limit=15):
v_region_list=MyVillage.objects.all()
northern= MyVillage(v_region='Northern')
western = MyVillage(v_region='Western')
uer = MyVillage(v_region='Upper East Region')
uwr = MyVillage(v_region='Upper West Region')
central = MyVillage(v_region='Central')
eastern = MyVillage(v_region='Eastern')
ga = MyVillage(v_region='Greater Accra')
ashanti = MyVillage(v_region='Ashanti')
ba = MyVillage(v_region='Brong Ahafo')
volta = MyVillage(v_region='Volta Region')
t = loader.get_template('i-kijiji/selectvillage.html')
c = Context({'v_region_list':v_region_list,'user':str(request.user), 'northern':northern, 'western':western, 'uer':uer, 'uwr':uwr, 'central':central, 'eastern':eastern, 'western':western,
'ga':ga, 'ashanti':ashanti, 'ba':ba, 'volta':volta})
return HttpResponse(t.render(c))
def v_northern_list(request, limit=15):
v_northern_list=MyVillage.objects.filter(v_region='northern').order_by('-v_updated')
return render_to_response('i-kijiji/v_northern.html', {'v_northern_list': v_northern_list})
def v_uer_list(request, limit=15):
v_uer_list=MyVillage.objects.filter(v_region='upper_east').order_by('-v_updated')
return render_to_response('i-kijiji/v_uer.html', {'v_uer_list': v_uer_list})
def v_uwr_list(request, limit=15):
v_uwr_list=MyVillage.objects.filter(v_region='upper_west').order_by('-v_updated')
return render_to_response('i-kijiji/v_uwr.html', {'v_uwr_list': v_uwr_list})
def v_volta_list(request, limit=15):
v_volta_list=MyVillage.objects.filter(v_region='volta').order_by('-v_updated')
return render_to_response('i-kijiji/v_volta.html', {'v_volta_list': v_volta_list})
def v_central_list(request, limit=15):
v_central_list=MyVillage.objects.filter(v_region='central').order_by('-v_updated')
return render_to_response('i-kijiji/v_central.html', {'v_central_list': v_central_list})
def v_ga_list(request, limit=15):
v_ga_list=MyVillage.objects.filter(v_region='greater_accra').order_by('-v_updated')
return render_to_response('i-kijiji/v_ga.html', {'v_ga_list': v_ga_list})
def v_ba_list(request, limit=15):
v_ba_list=MyVillage.objects.filter(v_region='brong_ahafo').order_by('-v_updated')
return render_to_response('i-kijiji/v_ba.html', {'v_ba_list': v_ba_list})
def v_eastern_list(request, limit=15):
v_eastern_list=MyVillage.objects.filter(v_region='eastern').order_by('-v_updated')
return render_to_response('i-kijiji/v_eastern.html', {'v_eastern_list': v_eastern_list})
def v_western_list(request, limit=15):
v_western_list=MyVillage.objects.filter(v_region='western').order_by('-v_updated')
return render_to_response('i-kijiji/v_western.html', {'v_western_list': v_western_list})
def v_ashanti_list(request, limit=15):
v_ashanti_list=MyVillage.objects.filter(v_region='ashanti').order_by('-v_updated')
return render_to_response('i-kijiji/v_ashanti.html', {'v_ashanti_list': v_ashanti_list})
# For Tourist Sites
@csrf_exempt
def region_list(request, limit=15):
region_list=TouristSite.objects.all()
northern= TouristSite(region='Northern')
western = TouristSite(region='Western')
uer = TouristSite(region='Upper East Region')
uwr = TouristSite(region='Upper West Region')
central = TouristSite(region='Central')
eastern = TouristSite(region='Eastern')
ga = TouristSite(region='Greater Accra')
ashanti = TouristSite(region='Ashanti')
ba = TouristSite(region='Brong Ahafo')
volta = TouristSite(region='Volta Region')
t = loader.get_template('i-kijiji/selectsite.html')
c = Context({'region_list':region_list,'user':str(request.user), 'northern':northern, 'western':western, 'uer':uer, 'uwr':uwr, 'central':central, 'eastern':eastern, 'western':western,
'ga':ga, 'ashanti':ashanti, 'ba':ba, 'volta':volta})
return HttpResponse(t.render(c))
def northern_list(request, limit=15):
northern_list=TouristSite.objects.filter(region='northern').order_by('-last_modified')
return render_to_response('i-kijiji/northern.html', {'northern_list': northern_list})
def uer_list(request, limit=15):
uer_list=TouristSite.objects.filter(region='upper_east').order_by('-last_modified')
return render_to_response('i-kijiji/uer.html', {'uer_list': uer_list})
def uwr_list(request, limit=15):
uwr_list=TouristSite.objects.filter(region='upper_west').order_by('-last_modified')
return render_to_response('i-kijiji/uwr.html', {'uwr_list': uwr_list})
def volta_list(request, limit=15):
volta_list=TouristSite.objects.filter(region='volta').order_by('-last_modified')
return render_to_response('i-kijiji/volta.html', {'volta_list': volta_list})
def central_list(request, limit=15):
central_list=TouristSite.objects.filter(region='central').order_by('-last_modified')
return render_to_response('i-kijiji/central.html', {'central_list': central_list})
def ga_list(request, limit=15):
ga_list=TouristSite.objects.filter(region='greater_accra').order_by('-last_modified')
return render_to_response('i-kijiji/ga.html', {'ga_list': ga_list})
def ba_list(request, limit=15):
ba_list=TouristSite.objects.filter(region='brong_ahafo').order_by('-last_modified')
return render_to_response('i-kijiji/ba.html', {'ba_list': ba_list})
def eastern_list(request, limit=15):
eastern_list=TouristSite.objects.filter(region='eastern').order_by('-last_modified')
return render_to_response('i-kijiji/eastern.html', {'eastern_list': eastern_list})
def western_list(request, limit=15):
western_list=TouristSite.objects.filter(region='western').order_by('-last_modified')
return render_to_response('i-kijiji/western.html', {'western_list': western_list})
def ashanti_list(request, limit=15):
ashanti_list=TouristSite.objects.filter(region='ashanti').order_by('-last_modified')
return render_to_response('i-kijiji/ashanti.html', {'ashanti_list': ashanti_list})
def login(request, limit=15):
#login=TouristSite.objects.filter(region='ashanti').order_by('-last_modified')
return render_to_response('i-kijiji/login.html')
def register(request, limit=15):
#login=TouristSite.objects.filter(region='ashanti').order_by('-last_modified')
return render_to_response('i-kijiji/register.html')
def welcome(request, limit=15):
return render_to_response('i-kijiji/welcome.html')
class SiteForm(ModelForm):
class Meta:
exclude=['review_title','review_author']
model=Review
def revform(self):
print review
return self.fields
class UploadedFileForm(ModelForm):
class Meta:
model=MyVillage
def village_form(self):
return self.fields
@csrf_exempt
def site_detail(request, id, showReviews=False):
site_detail=TouristSite.objects.get(id=id)
site_detail_iterable=TouristSite.objects.filter(id=id)
review_items=Review.objects.filter(review_title__id=id).order_by('-review_updated')[:3]
if showReviews:
reviews = Review.objects.filter(review_title__id=id)
print reviews
if request.method == 'POST':
review = Review(review_title=site_detail)
form = SiteForm(request.POST, instance=review)
if request.user.is_authenticated:
review.review_author = request.user.username
if form.is_valid():
form.save()
print form
return HttpResponseRedirect(request.path)
else:
form = SiteForm()
feature_list=MyVillage.objects.all().order_by('?')[:5]
top_villages=MyVillage.objects.all().order_by('-v_updated')[:5]
return render_to_response('i-kijiji/sitedetail.html', {'request':request, 'site_detail':site_detail, 'site_detail_iterable':site_detail_iterable, 'review_items':review_items, 'form':form.as_p(), 'feature_list':feature_list, 'top_villages':top_villages})
@csrf_exempt
def village_detail(request, id, showReviews=False):
village_detail=MyVillage.objects.get(id=id)
village_detail_iterable=MyVillage.objects.filter(id=id)
v_review_items=Review.objects.filter(review_title__id=id).order_by('-review_updated')
if showReviews:
v_reviews = Review.objects.filter(review_title__id=id)
print reviews
if request.method == 'POST':
v_review = Review(review_title=village_detail)
form = SiteForm(request.POST, instance=review)
if request.user.is_authenticated:
v_review.review_author = request.user.username
if form.is_valid():
form.save()
print form
return HttpResponseRedirect(request.path)
else:
form = SiteForm()
feature_list=MyVillage.objects.all().order_by('?')[:5]
top_villages=MyVillage.objects.all().order_by('-v_updated')[:5]
return render_to_response('i-kijiji/villagedetail.html', {'request':request, 'village_detail':village_detail, 'village_detail_iterable':village_detail_iterable, 'v_review_items':v_review_items, 'form':form.as_p(), 'feature_list':feature_list, 'top_villages':top_villages})
@csrf_exempt
def my_village(request, limit=15):
village_myform= MyVillage.objects.all().order_by('-v_updated')
if request.method == 'POST':
form = UploadedFileForm(request.POST, request.FILES)
if form.is_valid():
form.save()
#MyDonate.objects.create(first_name='first_name',last_name='last_name', country='country', state='state', city='city', phone='0244', confirm_code='confirm_code')
return render_to_response('i-kijiji/selectvillage.html', {'request':request, 'village_myform':village_myform})
else:
form = UploadedFileForm()
return render_to_response('i-kijiji/myvillage.html', {'form': form.as_p(), 'request':request, 'village_myform':village_myform})
#return render_to_response('i-kijiji/myvillage.html', {'request':request, 'my_village':my_village, 'my_village_iterable':my_village_iterabble, 'villages':villages, 'village_items':village_items, 'v_form':v_form.as_p()})
@csrf_exempt
def detail_review(request, id, limit=15, showReviews=False):
if showReviews:
editreview = Review.objects.get(id=id)
print editreview
if request.method == 'POST':
form = SiteForm(request.POST, instance=editreview)
if form.is_valid():
form.save()
print form
return HttpResponseRedirect('/i-kijiji/sitedetail/'+ str(editreview.review_title.id)+'/True')
else:
form = SiteForm(instance=editreview)
return render_to_response('i-kijiji/reviewsite.html', {'editreview':editreview, 'request':request, 'site_detail': site_detail, 'user':str(request.user)})
def feature_list(id):
feature_list=MyVillage.objects.all().order_by('?')[:5]
#v_features = [feature_list][:5]
return render_to_response('i-kijiji/features.html', {'feature_list':feature_list})
def feature_list(id):
#feature_list=MyVillage.objects.all().order_by('?')[:5]
#v_features = [feature_list][:5]
return render_to_response('i-kijiji/features.html', {'feature_list':feature_list})
def index(request):
t = loader.get_template('i-kijiji/index.html')
c = Context({'site_list':site_list,'user':str(request.user)})
return HttpResponse(t.render(c))
| [
"nsiire@gmail.com"
] | nsiire@gmail.com |
c40aad25bea3a654dd4c3fd8365a86a2bb84bb49 | 81dce1d4687c0591c944037e78a1c119c57868b9 | /EPF.py | 9baf36a2317b60d9589d2532c0e406e0adab345e | [] | no_license | RichardChangCA/Python-OpenCV-Practice | b7c0ea0abc50446612a7b6716c480b6fddc104c1 | 3db6323b814bcbcb6c01f14af0cf0c0370855e81 | refs/heads/master | 2021-07-20T20:48:35.570871 | 2020-05-24T23:31:01 | 2020-05-24T23:31:01 | 172,720,013 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 605 | py | """
边缘保留滤波EPF,两点像素很大,就很有可能是边缘,就不要平滑
高斯双边模糊
"""
import numpy as np
import cv2 as cv
def bi_demo(image):
dst = cv.bilateralFilter(image, 0, 100, 15)
cv.imshow("bi_demo", dst)
def shift_demo(image):
dst = cv.pyrMeanShiftFiltering(image, 10, 50)
cv.imshow("shift_demo", dst)
print("---hello python---")
src = cv.imread("C:/Users/46507/Desktop/zlf.jpg")
cv.namedWindow("input image", cv.WINDOW_AUTOSIZE)
cv.imshow("input image", src)
bi_demo(src)
shift_demo(src)
cv.waitKey(0)
cv.destroyAllWindows()
| [
"zlf465074419@gmail.com"
] | zlf465074419@gmail.com |
639812435ff737ef572d5857613e1dc73121356a | 090aacd45fc1b2c9910a5a2e91aa236e0ae13bce | /storage-clients/model-manager/src/main/resources/z3/allocz3Solver.py | 32edc2fe3bdcc5e9adc74859b26e8eba76471c33 | [] | no_license | BLasan/k8s-monitoring | 03767f709e338feb6215f00abaf278baa2e4f0e3 | 6f65bcdd40ef2c4cd334949a4be674cfa620aaad | refs/heads/master | 2023-03-31T02:56:03.145329 | 2020-02-12T17:30:22 | 2020-02-12T17:30:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,699 | py | #Sam Bayless, 2017 <https://github.com/sambayless>
#License: CC0 and/or MIT
from z3 import *
import random
import json
random.seed(0)
#create a new Z3 solver instance.
#s = Optimize() # Use Optimize() because we are doing optimization below. If not using Z3 for optimization, this should instead be s= Solver()
s = Solver()
class Job:
def __init__(self, name,required_cpu,required_memory):
self.name = name
# IntVal(python_value) creates a Z3 constant integer value, as opposed to a Z3 variable.
# In some cases, Z3 will implicitly convert a python value (eg, an int) into a Z3 constant,
# but in some cases it does not, so it helps to avoid bugs if you always explicitly create python constants
# using IntVal, BoolVal, or RealVal
# If you were instead creating Z3 variables (rather than Z3 constants), you would use Int(), Real(), or Bool()
self.required_memory = IntVal(required_memory)
self.required_cpu = IntVal(required_cpu)
class Node:
def __init__(self, name, available_cpu,available_memory):
self.name = name
self.available_cpu = IntVal(available_cpu)
self.available_memory = IntVal(available_memory)
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return isinstance(other, Node) and self.name == other.name
expected_runtimes=dict()
data = \
json.load(open('/home/adalrsjr1/Code/ibm-stack/storage-clients/model-manager/src/main/resources/z3/allocz3.json'))
def dictToNode(d):
return Node(d['name'], d['cpu'], d['memory'])
def dictToJob(d):
return Job(d['name'], d['cpu'], d['memory'])
affinities = dict()
jobs = []
nodes = set()
for affinity in data:
j1 = dictToJob(affinity['source'])
j2 = dictToJob(affinity['target'])
jobs.append(j1)
jobs.append(j2)
assert (j1 != j2)
affinities[(j1,j2)] = affinity['affinity']
n1 = dictToNode(affinity['source']['host'])
n2 = dictToNode(affinity['target']['host'])
if n1 not in nodes:
nodes.add(n1)
if n2 not in nodes:
nodes.add(n2)
#for i in range(10):
# # generate some nodes with random settings.
# # In practice, these should be replaced with the real values for your network
# cpu = random.randint(1,2000)
# memory = 4096
# nodes.append(Node("Node_%d"%(i),cpu,memory))
#
##create some jobs
#jobs=[]
#for j in range(10):
# # creating fake, random expected runtime predictions for each job
# # you should read these out of the csv instead
# cpu_requirement = random.randint(1,500)
# memory_requirement = random.randint(1,1024)
# jobs.append(Job("j%d"%(j), cpu_requirement, memory_requirement))
#
# create some job affinities.
#affinities = dict()
#for a in range(10):
# #pick two jobs at random and create an affinity between them
# j1,j2 = random.sample(jobs,2)
# assert(j1!=j2)
# affinity = IntVal(random.randint(0,10)) # the higher the affinity score, the more we want to place these two jobs
# # on the same node
# affinities[(j1,j2)]=affinity
#The following constraints force Z3 to find a valid placement of jobs to nodes (but do not yet attempt to maximize
# affinity)
job_placements = dict()
for j in jobs:
job_placements [j]=dict()
node_placements = dict()
for n in nodes:
node_placements [n]=[]
for j in jobs:
#each job has to be placed on exactly one node
node_choices = []
node_choices_pb = []
for n in nodes:
# For each job node pair, create a Boolean variable in Z3.
# If that Bool is assigned true, then we interpret it to mean that Z3 placed this job on this node.
# Note: All Z3 variables (not constants) must be given a unique string name, which must be different from
# any other Z3 variables. In this case, this Bool variable is given the name "place_%s_on_%s"%(j.name,n.name)
p = Bool("place_%s_on_%s"%(j.name,n.name));
node_choices.append(p)
node_choices_pb.append((p,1))
node_placements[n].append((p,j))
job_placements[j][n] =p
#Assert that each job is placed on _exactly_ one node
# there are several encodings that can achieve this constraint, and you may need to play around with a few to find
# the one that has the best performance. Below I am using a Pseudo-Boolean encoding. But arithmetic
# encodings are also possible (commented out below)
#s.add(z3.PbEq(node_choices_pb, 1)) # this not work for just one node
s.add(Sum([If(b, 1, 0) for b in node_choices]) == 1)
# assert that, for each node, the sum of the jobs placed on that node do not exceed the available CPU
# this is 'hard' constraint - Z3 will refuse to find a solution at all, if there does not exist a placement that respects
# these constraints
for n in nodes:
placements = node_placements[n]
sum_used_cpu = Sum([If(p,j.required_cpu,0) for p,j in placements])
s.add(sum_used_cpu<=n.available_cpu)
n.sum_used_cpu = sum_used_cpu
# assert that, for each node, the sum of the jobs placed on that node do not exceed the available memory
for n in nodes:
placements = node_placements[n]
sum_used_memory = Sum([If(p,j.required_memory,0) for p,j in placements])
s.add(sum_used_memory<=n.available_memory)
n.sum_used_memory = sum_used_memory
# maximize the sum total affinity.
# there are other possible ways we could set up this objective function for the affinity score.
affinity_score = IntVal(0)
for (j1, j2),val in affinities.items():
both_jobs_on_same_node=[]
for n in nodes:
both_jobs_on_this_node = And(job_placements[j1][n],job_placements[j2][n])
both_jobs_on_same_node.append(both_jobs_on_this_node)
# if both jobs are placed by Z3 on the same node, then add their affinity value to the affinity score
affinity_score = If(Or(both_jobs_on_same_node),affinity_score+val,affinity_score)
#s.maximize(affinity_score ) # The objective function should be an integer (or real) that Z3 will minimize or maximize.
s.add(affinity_score > 1)
#print("Solving...")
r = s.check()
#print(str(r))
if r==sat: # attempt to solve the instance, and return True if it could be solved
#print("Found valid placement")
m = s.model() # the model contains the actual assignments found by Z3 for each variable
# print out the objective function we are minimizing
# m.evaluate(x,True) extracts the sat solver's solution from the model
# and then .as_long() converts that solution into a python long that we can print
a = m.evaluate(affinity_score,model_completion=True).as_long()
#print("Affinity score is %d" % (a))
assert(a>=0)
# print out the allocation found by Z3
print("[")
for j in jobs:
placements = job_placements[j]
n_found=0
for n,p in placements.items():
val = m.evaluate(p, True)
if val:
assert(n_found==0)
n_found+=1
#print("Placed job %s on node %s"%(j.name,n.name))
print('{"job":"%s", "host":"%s"},'%(j.name, n.name))
assert(n_found==1) # sanity check: each job should be placed on exactly one node
print("{}]")
#sanity checking the cpu/ram requirements
for n in nodes:
cpu_usage = m.evaluate(n.sum_used_cpu,True).as_long()
available_cpu = m.evaluate(n.available_cpu,True).as_long()
assert(cpu_usage<=available_cpu)
memory_usage = m.evaluate(n.sum_used_memory, True).as_long()
available_memory = m.evaluate(n.available_memory, True).as_long()
assert (memory_usage <= available_memory)
else:
#print("Could not find a valid placement")
print("[]")
| [
"adalrsjr1@gmail.com"
] | adalrsjr1@gmail.com |
106e3d6aafafc929974d310d2d52e5eb63047a10 | 2b84f1161c2a75adf14957f4b24f3bf8d7d230e5 | /4_Socio_economic/5_Roads.py | f4c53d4ffc1622fc18fc4e5744720b822f44c02c | [] | no_license | SophiePlassin/GeodatabaseRGB | 4d24efc7d70db704968e42559a19c1a0aed670ce | d0229e28a9a320d0cf87507778692df982ccff8c | refs/heads/master | 2020-11-27T21:56:50.645202 | 2020-01-08T14:39:34 | 2020-01-08T14:39:34 | 229,616,711 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,803 | py | ## Name: Roads.py
## Created on: 2019-05-09
## By: Sophie Plassin
## Description: Preparation of the roads shapefiles for the Rio Grande/Bravo basin (RGB)
## 1. Project the shapefiles to North America Albers Equal Area Conic
## 2. Clip the shapefiles
## ---------------------------------------------------------------------------
## Import packages and define the path name of the directories
# Import arcpy module
import datetime
import arcpy
import os
from arcpy import env
from arcpy.sa import *
print "Geoprocess starts at", datetime.datetime.now().strftime("%A, %B %d %Y %I:%M:%S%p")
### Set environment settings
env.workspace = "C:\\GIS_RGB\\Geodatabase\\Socio_Economics\\5_Road\\original_input\\"
dirpath = env.workspace
interFolder = "C:\\GIS_RGB\\Geodatabase\\Socio_Economics\\5_Road\\inter_output\\"
finalFolder = "C:\\GIS_RGB\\Geodatabase\\Socio_Economics\\5_Road\\final_output\\"
# Local variables:
in_files = arcpy.ListFeatureClasses()
projList = []
clip_features = []
## ---------------------------------------------------------------------------
## 1. Project the polygon
## Description: Project ecoregion polygon to North America Albers Equal Area Conic
print "\nStep 1 Project the polygon starts at", datetime.datetime.now().strftime("%A, %B %d %Y %I:%M:%S%p")
outCS = arcpy.SpatialReference("North America Albers Equal Area Conic")
# Project
for fc in in_files:
projName = os.path.join(interFolder, "Roads")
projFile = projName + "_pr.shp"
arcpy.Project_management(fc, projFile, outCS)
projList.append(projFile)
print "Projection" , fc, "completed at" , datetime.datetime.now().strftime("%I:%M:%S%p")
## ---------------------------------------------------------------------------
## 2. Clip
## Description: Clip the waterbody polygon for the study area.
print "\nStep 2 starts at", datetime.datetime.now().strftime("%A, %B %d %Y %I:%M:%S%p")
# Create the clip_features
folderShapefiles = "C:\\GIS_RGB\\Geodatabase\\rgb_bound\\"
listfc = ["RGB_Basin_na_albers.shp", "RGB_Ses_na_albers.shp"]
xy_tolerance = ""
for fc in listfc:
out_shp = os.path.join(folderShapefiles, fc)
clip_features.append(out_shp)
#Execute Clip
for fc in projList:
print fc
for clip in clip_features:
temp = os.path.split(clip)[-1]
name = os.path.split(fc)[1]
new_name = name.split('_pr.shp')[0]
if temp.startswith("RGB_Basin"):
output = finalFolder + new_name + "_bas.shp"
else:
output = finalFolder + new_name + "_ses.shp"
arcpy.Clip_analysis(fc, clip, output, xy_tolerance)
print "Clip" , fc, "completed at" , datetime.datetime.now().strftime("%I:%M:%S%p")
print "Step 2 Clip completed at", datetime.datetime.now().strftime("%I:%M:%S%p")
| [
"sophie.plassin@ou.edu"
] | sophie.plassin@ou.edu |
4645afbd217aaeb12a8d11bbcccd5f7dff4e404f | e6dea1ae250496e489f2bb1ac10a597947adb38f | /semana 1/ejemplos/continue.py | 5125b4decaf0ba126782cbc00243b27ba1d5bb1e | [] | no_license | frankvasquez94/curso-python-django | 68df07dc0762bdf03c85c3489680385e6212659c | da7bb1172f590ef1ec1040a65c71b8556b24c80c | refs/heads/master | 2021-01-18T05:15:12.834162 | 2016-05-11T19:12:07 | 2016-05-11T19:12:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | while True:
s=input('enter something: ')
if s=='quit':
break
if len(s)<3:
print('too small')
continue
print('it is sufficient length')
print('Done')
| [
"kevinchristo_garcia@hotmail.com"
] | kevinchristo_garcia@hotmail.com |
eda6de58d6e0cf0a1b21783d539bcf06a3737610 | 13d79af5600aacddb90c0848fe5ce566ec3cba77 | /azsentinel/ui/widget/tabs.py | 5f1a89e9f4c1ec1390c36cff8e93b652480568cc | [
"MIT"
] | permissive | FrodeHus/azsctl | f6316f3934233f80e35bb73f1e8d2367080ffdb3 | bc7486770b707e6e33dfd6f0c9b6382731dd1b57 | refs/heads/main | 2023-02-22T14:32:32.906115 | 2021-01-28T06:44:35 | 2021-01-28T06:44:35 | 325,632,011 | 1 | 0 | MIT | 2021-01-27T09:44:17 | 2020-12-30T19:30:11 | Python | UTF-8 | Python | false | false | 1,796 | py | import urwid
class TabItem:
def __init__(self, label, widget):
self.label = label
self.widget = widget
class Tab(urwid.WidgetWrap):
def __init__(self, offset, label, attr, onclick):
w = urwid.Text(label, align="center")
w = urwid.Padding(w, align="center")
w = urwid.AttrMap(w, attr)
urwid.WidgetWrap.__init__(self, w)
self.tab_offset = offset
self.onclick = onclick
def mouse_event(self, size, event, button, col, row, focus):
if event == "mouse" and button == 1:
self.onclick(self.offset)
return True
class TabPanel(urwid.WidgetWrap):
def __init__(self, tabs, offset = 0):
super().__init__("")
self.tabs = tabs
self.tab_offset = offset
self.show()
def keypress(self, size, key):
num_tabs = len(self.tabs)
if key == "left":
self.set_active_tab((self.tab_offset - 1) % num_tabs)
elif key == "right":
self.set_active_tab((self.tab_offset + 1) % num_tabs)
else:
return self._w.keypress(size, key)
def set_active_tab(self, offset):
self.tab_offset = offset
self.show()
def show(self):
if not self.tabs:
return
headers = []
for idx in range(len(self.tabs)):
label = self.tabs[idx].label
if self.tab_offset == idx:
headers.append(Tab(idx, label, "heading", self.set_active_tab))
else:
headers.append(Tab(idx, label, "heading inactive", self.set_active_tab))
headers = urwid.Columns(headers, dividechars=1)
body = self.tabs[self.tab_offset].widget
self._w = urwid.Frame(body, header=headers)
self._w.set_focus("body") | [
"noreply@github.com"
] | noreply@github.com |
c594fc5656c2269a77d24486c0c09a04bf5a7f2f | 650b25db432df4e8b9f6560e23d7c96402eef821 | /Scripts/console/edition.py | eb6bf06a3a4728940133dcdbe4d1f9278fbb2314 | [
"BSD-2-Clause"
] | permissive | hrbrmstr/graphiti | 1bd00870e9f8075fb7dd6b5a119f03326c159612 | 56d85e5262b76dc6ce4f213a4d80486e015de1b7 | refs/heads/master | 2023-03-17T20:21:11.832339 | 2016-11-03T22:17:19 | 2016-11-03T22:17:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,823 | py | import script
from script import *
class Info(script.Script):
def run(self, args):
self.console.log("{0} nodes, {1} edges.".format(og.count_nodes(), og.count_edges()))
class Load(script.Script):
def run(self, args):
if len(args) < 2:
self.console.log("Usage: {0} <filename>".format(args[0]))
return
std.load_json(" ".join(args[1:]))
class Save(script.Script):
def run(self, args):
if len(args) != 2:
self.console.log("Usage: {0} <filename>".format(args[0]))
return
if os.path.isfile(args[1]):
self.console.log("Error: File already exists!")
return
std.save_json(args[1])
self.console.log("File saved in '{0}'.".format(args[1]))
class Screenshot(script.Script):
def run(self, args):
if len(args) != 2 and len(args) != 3:
self.console.log("Usage: {0} <filename> [<factor>]".format(args[0]))
return
if os.path.isfile(args[1]):
self.console.log("Error: File {0} already exists!".format(args[1]))
return
filename = args[1]
try:
factor = float(args[2])
except:
factor = 1.0
if not filename.lower().endswith('.tga'):
self.console.log("Extension not recognized, needs to be TGA")
return
og.screenshot(filename, factor)
self.console.log("Screenshot with factor {0} saved in '{1}'.".format(factor, filename))
class Clear(script.Script):
def clear_graph(self):
for id in og.get_node_ids():
og.remove_node(id)
def clear_colors(self):
og.set_attribute("og:space:edgemode", "string", "node_color")
for n in og.get_node_ids():
og.set_node_attribute(n, "og:space:color", "vec4", "1.0 1.0 1.0 1.0")
def clear_icons(self):
for n in og.get_node_ids():
og.set_node_attribute(n, "og:space:icon", "string", "shapes/disk")
def clear_activity(self):
for n in og.get_node_ids():
og.set_node_attribute(n, "og:space:activity", "float", "0.0")
for e in og.get_edge_ids():
og.set_edge_attribute(e, "og:space:activity", "float", "0.0")
def clear_lod(self):
for n in og.get_node_ids():
og.set_node_attribute(n, "og:space:lod", "float", "1.0")
for e in og.get_edge_ids():
og.set_edge_attribute(e, "og:space:lod", "float", "1.0")
def run(self, args):
if len(args) == 2 and args[1] == "graph":
self.clear_graph()
elif len(args) == 2 and args[1] == "colors":
self.clear_colors()
elif len(args) == 2 and args[1] == "icons":
self.clear_icons()
elif len(args) == 2 and args[1] == "activity":
self.clear_activity()
elif len(args) == 2 and args[1] == "lod":
self.clear_lod()
else:
self.console.log("Usage: {0} [graph|colors|icons|activity|lod]".format(args[0]))
class Set(script.Script):
def __init__(self, console):
super(Set, self).__init__(console)
def run(self, args):
if len(args) < 3:
self.console.log("Usage: {0} <type> <name> <value>".format(args[0]))
return
for key in self.console.query.keys():
entity_type = key[:-1] # TODO : Hack!! find a better way to do this. This removes the ending 's'
for entity_id in self.console.query[key]:
self.console.api.set_attribute(entity_type, entity_id, args[2], args[1], " ".join(args[3:]))
class Get(script.Script):
def __init__(self, console):
super(Get, self).__init__(console)
def run(self, args):
if len(args) < 2:
self.console.log("Usage: {0} <name>".format(args[0]))
return
for key in self.console.query.keys():
entity_type = key[:-1] # TODO : Hack!! find a better way to do this. This removes the ending 's'
result = dict()
for entity_id in self.console.query[key]:
result[entity_id] = self.console.api.get_attribute(entity_type, entity_id, args[1])
self.console.log("{0}: {1}".format(key, json.dumps(result)))
class Remove(script.Script):
def __init__(self, console):
super(Remove, self).__init__(console)
def run(self, args):
if 'edges' in self.console.query:
[ og.remove_edge(eid) for eid in self.console.query['edges'] ]
if 'nodes' in self.console.query:
[ og.remove_node(nid) for nid in self.console.query['nodes'] ]
class Map(script.Script):
def __init__(self, console):
super(Map, self).__init__(console)
def attr_convert(self, src_type, src_value, dst_type, options):
if src_type != dst_type:
raise Exception("Mapping from {0} to {1} not supported!".format(src_type, dst_type))
if dst_type == "vec2":
return std.vec2_to_str(src_value)
elif dst_type == "vec3":
return std.vec3_to_str(value)
elif dst_type == "vec4":
return std.vec4_to_str(value)
else:
if len(options) == 2 and options[0] == "--format":
value = options[1].format(src_value)
return value
else:
return "{0}".format(src_value)
def lambda_map(self, element_type, element_id, src_type, src_name, dst_type, dst_name, options = None):
if element_type == "node":
source = og.get_node_attribute(element_id, src_name)
target = self.attr_convert(src_type, source, dst_type, options)
self.console.log("og.set_node_attribute({0}, {1}, {2}, {3})".format(element_id, dst_name, dst_type, target))
og.set_node_attribute(element_id, dst_name, dst_type, target)
elif element_type == "edge":
source = og.get_edge_attribute(element_id, src_name)
target = self.attr_convert(src_type, source, dst_type, options)
og.set_edge_attribute(element_id, dst_name, dst_type, target)
def run(self, args):
if len(args) < 6 and args[3] == 'to':
self.console.log("Usage: {0} <src type> <src attribute> to <dst type> <dst attribute> [options]".format(args[0]))
return
if 'nodes' in self.console.query:
for nid in self.console.query['nodes']:
self.lambda_map("node", nid, args[1], args[2], args[4], args[5], args[6:])
if 'edges' in self.console.query:
for eid in self.console.query['edges']:
self.lambda_map("edge", eid, args[1], args[2], args[4], args[5], args[6:]) | [
"thibault@opendns.com"
] | thibault@opendns.com |
85f1ae52a67e4550b2f5f813d49ec399e7cea2dd | a142a6914d5b2e0c1281f76cd1530cce43fdcf79 | /example.py | f2cd088420ef57e07756fb820922b3f91c024ef2 | [
"BSD-3-Clause"
] | permissive | Sauci/pydbc | b45a930d4ef52dbff18c04248706e38db48fd6c4 | da80e0b143f0dbb1ed63ae5d277128c723071454 | refs/heads/master | 2021-03-14T17:40:46.184649 | 2020-04-16T12:41:42 | 2020-04-16T12:41:42 | 246,780,237 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | from pydbc.parser import DbcParser
dbc_data = """
VERSION "my_version"
NS_ :
BU_EV_REL_
BU_BO_REL_
BO_ 1234 my_message_name: 2 my_transmitter
SG_ my_first_signal_name : 56|8@1+ (16,0) [0|4000] "my_first_signal_unit" my_first_signal_transmitter
SG_ my_second_signal_name : 48|8@1+ (1,0) [0|250] "my_second_signal_unit" my_first_signal_transmitter
"""
if __name__ == '__main__':
p = DbcParser(dbc_data)
assert p.ast.version == 'my_version'
assert p.ast.messages[0].identifier == 1234
first_signal = p.ast.messages[0].signals[0]
assert first_signal.name == 'my_first_signal_name'
assert first_signal.signal_size == 8
assert first_signal.start_bit == 56
assert first_signal.value_type == '+'
assert first_signal.factor == 16
assert first_signal.offset == 0
assert first_signal.minimum == 0
assert first_signal.maximum == 4000
| [
"guillaume.sottas@liebherr.com"
] | guillaume.sottas@liebherr.com |
197325c25fc711a72451d8bc53cb0a09c5fb9388 | be78578cd0a31f524629b2f937e28474309c4493 | /app/auth/views.py | 98b9605b4a6f40c11ac86443192e654e24507cdf | [] | no_license | sys3948/Tutorial-Flask-ACD | c6111bb46ee2900dad71e36d14fc685d7df2db88 | 19b1b63054c4fe8a4a074729dad25fd644d4697f | refs/heads/master | 2023-07-13T11:41:47.070651 | 2021-08-18T08:03:00 | 2021-08-18T08:03:00 | 379,833,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,565 | py | from flask import render_template, redirect, request, url_for, flash, current_app, session
from wtforms.validators import Email
from . import auth
from .forms import LoginForm, RegistrationForm, ChangePasswordForm, PasswordResetForm, PasswordResetRequestForm, ChangeEmailForm
from ..email import send_email
import pymysql
from werkzeug.security import generate_password_hash, check_password_hash # 비밀번호 해쉬를 위한 모듈.
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from datetime import date, datetime
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
# user = User.query.filter_by(email = form.email.data).first()
conn = pymysql.connect(host='192.168.111.133', port=3306, user=current_app.config['DB_USER'], passwd=current_app.config['DB_PASSWD'], database='flasky')
cur = conn.cursor()
cur.execute('select id, password_hash, username from user where email = "%s"' %(form.email.data))
user = cur.fetchone()
cur.close()
conn.close()
if user is not None and check_password_hash(user[1], form.password.data):
session['id'] = user[0]
session['name'] = user[2]
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password')
return render_template('auth/login.html', form = form)
@auth.route('/logout')
def logout():
session.clear()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
conn = pymysql.connect(host='192.168.111.133', port=3306, user=current_app.config['DB_USER'], passwd=current_app.config['DB_PASSWD'], database='flasky')
cur = conn.cursor()
cur.execute('insert into user(email, username, password_hash) values("%s", "%s", "%s")' %(form.email.data, form.username.data, generate_password_hash(form.password.data)))
conn.commit()
cur.execute('select id from user where email = "%s"' %form.email.data)
confirm = cur.fetchone()
cur.close()
conn.close()
s = Serializer(current_app.config['SECRET_KEY'], 3600)
token = s.dumps({'confirm' : confirm[0]})
send_email(form.email.data, 'Confirm Your Account', 'auth/email/confirm', username=form.username.data, token=token)
flash('A confirmation email has been sent to you by email.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form = form)
@auth.route('/confirm/<token>')
def confirm(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except Exception as e:
flash('token error! ' + str(e))
return redirect(url_for('auth.login'))
conn = pymysql.connect(host='192.168.111.133', port=3306, user=current_app.config['DB_USER'], passwd=current_app.config['DB_PASSWD'], database='flasky')
cur = conn.cursor()
cur.execute('select id, confirmed, email from user where id = "%s"' %(data.get('confirm')))
user = cur.fetchone()
if user[1]:
return redirect(url_for('main.index'))
elif user[0] == data.get('confirm'):
cur.execute('update user set confirmed=true where id="%s"' %(user[0]))
if user[2] == current_app.config['FLASKY_ADMIN']:
cur.execute("update user set role_id = (select id from role where name='Administrator') where id = '%s'" %(user[0]))
else:
cur.execute("update user set role_id = (select id from role where default_value = 1) where id = '%s'" %(user[0]))
conn.commit()
flash('You have confirmed your account. Thanks!')
else:
flash('The confirmation link is invalid or has expired.')
cur.close()
conn.close()
return redirect(url_for('main.index'))
@auth.before_app_request
def before_request(): # request hooks - before_request : 각 리퀘스트 전에 실행하는 함수를 등록하는 후크이다. before_request 와 같은 후크로 blueprint를 적용할 시 before_request 후크는 before_app_request로 변경된다.
# print('request hooks!!')
# print('request End Point! : ' + request.endpoint) # 현재 URL에 해당하는 뷰함수의 포인트를 출력 ex) main/views.py의 index 뷰함수면 main.index
# print('request Blueprint! : ' + request.blueprint) # 현재 URL에 해당하는 blueprint의 명칭을 출력.
if 'id' in session and 'name' in session: # 로그인 확인 조건문을 통해 로그인 여부를 체크한다. 조건문이 True라면 최신 접속일은 update한다.
conn = pymysql.connect(host='192.168.111.133', port=3306, user=current_app.config['DB_USER'], passwd=current_app.config['DB_PASSWD'], database='flasky')
cur = conn.cursor()
cur.execute('select confirmed, username from user where id = "%s"' %(session.get('id')))
user = cur.fetchone()
# request hooks를 이용하여 최근 로그인 시간은 update하는 기능인데 이 부분을 login과 logout을 시도할 때 update하는것이 더 낫지 않을까..?
cur.execute("update user set last_seen = '%s' where id = '%s'" %(datetime.now().strftime('%Y-%m-%d %H:%M:%S'), session.get('id')))
conn.commit()
if not user[0] and request.blueprint != 'auth': # user 인증 확인과 auth blueprint를 제외(로그인, 회원가입, 재인증, 인증 등에 엮이지 않도록 하기 위한 조건문 내용)하는 조건문으로 인증이 되지 않으면 재인증 페이지로 이동
cur.close()
conn.close()
return redirect(url_for('auth.unconfirmed'))
cur.close()
conn.close()
@auth.route('/unconfirmed')
def unconfirmed():
if 'id' in session and 'name' in session:
conn = pymysql.connect(host='192.168.111.133', port=3306, user=current_app.config['DB_USER'], passwd=current_app.config['DB_PASSWD'], database='flasky')
cur = conn.cursor()
cur.execute('select confirmed, username from user where id = "%s"' %(session.get('id')))
confirm = cur.fetchone()
cur.close()
conn.close()
if confirm[0]:
return redirect(url_for('main.index'))
return render_template('auth/unconfirmed.html', username = confirm[1])
@auth.route('/confirm')
def resend_confirmation():
if not 'id' in session and not 'name' in session:
return redirect(url_for('auth.login'))
s = Serializer(current_app.config['SECRET_KEY'], 3600)
token = s.dumps({'confirm' : session.get('id')})
conn = pymysql.connect(host='192.168.111.133', port=3306, user=current_app.config['DB_USER'], passwd=current_app.config['DB_PASSWD'], database='flasky')
cur = conn.cursor()
cur.execute('select username, email from user where id = "%s"' %(session.get('id')))
user = cur.fetchone()
cur.close()
conn.close()
send_email(user[1], 'Confirm Your Account', 'auth/email/confirm', username=user[0], token=token)
print('send Email!!')
flash('A new confirmation email has been sent to you by email.')
return redirect(url_for('main.index'))
@auth.route('/change-password', methods=['GET','POST'])
def change_password():
if not 'name' in session and not 'id' in session:
flash('로그인을 해주세요.')
return redirect(url_for('auth.login'))
form = ChangePasswordForm()
if form.validate_on_submit(): # submit 버튼을 클릭 했을 때 해당 조건문은 True가 된다.
conn = pymysql.connect(host='192.168.111.133', port=3306, user=current_app.config['DB_USER'], passwd=current_app.config['DB_PASSWD'], database='flasky')
cur = conn.cursor()
cur.execute('select password_hash from user where id = "%s"' %(session.get('id')))
user = cur.fetchone()
if user and check_password_hash(user[0], form.old_password.data): # 위에서 조회한 페스워드 해쉬 값과 사용자가 입력한 현재 비밀벊호가 맞는지 확인하고 서브밋한 사용자 계정의 존재 유무를 확인하는 조건문이다.
cur.execute("update user set password_hash = '%s' where id = '%s'" %(generate_password_hash(form.password.data), session.get('id'))) # 조건문이 True이면 사용자가 변경한 비밀번호 값을 werkzeug.security 모듈의 generate_password_hash 함수로 해싱을 하여 DB에 Update한다.
conn.commit()
flash('Your password has been updated')
cur.close()
conn.close()
return redirect(url_for('main.index'))
else:
flash('Invalid password')
cur.close()
conn.close()
return render_template('auth/change_password.html', form = form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_required():
# password를 잊었을 때 찾기 위한 뷰함수로 로그인이 되어있지 않는 상태여야한다.
if 'name' in session and 'id' in session:
flash('잘 못 접근하셨습니다.')
return redirect('main.index')
form = PasswordResetRequestForm()
if form.validate_on_submit():
# reset password 요청 폼이 submit되었을 때 해당 이메일로 토큰을 전송해야한다.
# 토큰 전송할 값은 id 컬럼 값이다.
conn = pymysql.connect(host='192.168.111.133', port=3306, user=current_app.config['DB_USER'], passwd=current_app.config['DB_PASSWD'], database='flasky')
cur = conn.cursor()
cur.execute('select id, username from user where email = "%s"' %(form.email.data))
token_id = cur.fetchone()
cur.close()
conn.close()
if not token_id:
flash('해당 이메일이 존재하지 않습니다.')
return redirect(url_for('auth.password_reset_required'))
s = Serializer(current_app.config['SECRET_KEY'], 3600)
token = s.dumps({'reset' : token_id[0]})
send_email(form.email.data, 'Reset Your Password', 'auth/email/reset_password',username = token_id[1], token = token)
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def reset_password(token):
if 'name' in session and 'id' in session:
flash('잘 못 된 접근 입니다.')
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
s = Serializer(current_app.config['SECRET_KEY'], 3600)
conn = pymysql.connect(host='192.168.111.133', port=3306, user=current_app.config['DB_USER'], passwd=current_app.config['DB_PASSWD'], database='flasky')
cur = conn.cursor()
try:
data = s.loads(token)
print(data)
cur.execute("select id from user where id = '%s'" %(data.get('reset')))
user_info = cur.fetchone()
if not user_info:
cur.close()
conn.close()
flash('잘 못 된 정보입니다.')
return redirect(url_for('main.index'))
cur.execute("update user set password_hash = '%s' where id = '%s'" %(generate_password_hash(form.password.data), user_info[0]))
conn.commit()
cur.close()
conn.close()
flash('Your password has been update.')
return redirect(url_for('auth.login'))
except Exception as e:
cur.close()
conn.close()
print('에러 발생! 에러 내용 : ' + str(e))
flash('에러가 발생했습니다.')
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form = form)
@auth.route('/change_email', methods=['GET', 'POST'])
def change_email():
if not 'id' in session and not 'name' in session:
flash('로그인을 해주세요.')
return redirect(url_for('auth.login'))
form = ChangeEmailForm()
if form.validate_on_submit():
conn = pymysql.connect(host='192.168.111.133', port=3306, user=current_app.config['DB_USER'], passwd=current_app.config['DB_PASSWD'], database='flasky')
cur = conn.cursor()
if cur.execute("select email from user where email = '%s'" %(form.email.data)):
cur.close()
conn.close()
flash('가입된 이메일입니다.')
return redirect(url_for('auth.change_email'))
cur.execute("select username, password_hash from user where id = '%s'" %(session.get('id')))
token_id = cur.fetchone()
cur.close()
conn.close()
if check_password_hash(token_id[1], form.password.data):
s = Serializer(current_app.config['SECRET_KEY'], 3600)
token = s.dumps({'id' : session.get('id'), 'email' : form.email.data})
send_email(form.email.data, 'Change Your Email', 'auth/email/change_email', username = token_id[0], token = token)
flash('이메일 인증 절차를 진행합니다. 인증 메일 ' + form.email.data + '에 메일을 전송했습니다. 확인해주세요.')
return redirect(url_for('main.index'))
else:
flash('비밀번호가 옳바르지 않습니다.')
return redirect(url_for('auth.change_email'))
return render_template('auth/change_email.html', form = form)
@auth.route('/email_confirm/<token>')
def change_email_token(token):
print('test email token')
if not 'id' in session and not 'name' in session:
flash('로그인을 해주세요.')
return redirect(url_for('auth.login'))
s = Serializer(current_app.config['SECRET_KEY'], 3600)
data = s.loads(token)
conn = pymysql.connect(host='192.168.111.133', port=3306, user=current_app.config['DB_USER'], passwd=current_app.config['DB_PASSWD'], database='flasky')
cur = conn.cursor()
if not cur.execute('select * from user where id = "%s"' %(data.get('id'))):
cur.close()
conn.close()
flash('잘 못 된 토큰 정보입니다.')
return redirect(url_for('auth.change_email'))
cur.execute("update user set email = '%s' where id = '%s'" %(data.get('email'), data.get('id')))
conn.commit()
cur.close()
conn.close()
session.clear()
flash('이메일 수정되었습니다. 다시 로그인을 해주세요.')
return redirect(url_for('auth.login')) | [
"sys394880@gmail.com"
] | sys394880@gmail.com |
3f7a7cf1b5d55fbc3cfbdca88c06a9518592cfcf | 868628e2160223e8d76b2863523d35d4e5d089fb | /simple_calculator/test.py | cd97f3d03e9955b402c03ceb5eb43e6773787a2a | [] | no_license | And24reas/pythonGUI | aaf9b2e93100e756960c13782c5f5c43ebc3fb8d | d4d72a6942fd149531950281027807a0b7831c2a | refs/heads/main | 2023-01-30T16:38:52.811998 | 2020-12-14T15:17:29 | 2020-12-14T15:17:29 | 320,246,332 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | import numpy as np
import matplotlib.pyplot as plt
a = np.array([0,1,2,3,4,5])
b=a[::-1]
print (b)
plt.plot(b)
plt.show() | [
"meinakkount@gmail.com"
] | meinakkount@gmail.com |
7d4d4b044a16b0f6c2c8a67d3dfe66cc9b53aae6 | ec48aba8de872da53e588c39b0e567f535d55a41 | /aiida_cp2k/tests/test_precision.py | e1de692bd4ec901e82dab8c42af6580b11505852 | [
"MIT"
] | permissive | dev-zero/aiida-cp2k | 4d38dc6e09867d79fbfb21f596ae5aeffc2c49b5 | 84985c184f9fe0516f84d116a0b24a56cb9c11c7 | refs/heads/master | 2022-05-01T11:24:54.205029 | 2019-05-08T08:52:12 | 2019-05-08T08:52:12 | 160,215,898 | 1 | 0 | null | 2018-12-03T15:51:24 | 2018-12-03T15:51:23 | null | UTF-8 | Python | false | false | 2,957 | py | # -*- coding: utf-8 -*-
###############################################################################
# Copyright (c), The AiiDA-CP2K authors. #
# SPDX-License-Identifier: MIT #
# AiiDA-CP2K is hosted on GitHub at https://github.com/aiidateam/aiida-cp2k #
# For further information on the license, see the LICENSE.txt file. #
###############################################################################
"""Test structure roundtrip precision ase->aiida->cp2k->aiida->ase"""
from __future__ import print_function
from __future__ import absolute_import
import pytest
from . import get_computer, get_code
@pytest.mark.process_execution
def test_structure_roundtrip_precision(new_workdir):
"""Testing structure roundtrip precision ase->aiida->cp2k->aiida->ase..."""
import ase.build
import numpy as np
from aiida.engine import run
from aiida.plugins import CalculationFactory
from aiida.orm import Dict, StructureData
computer = get_computer(workdir=new_workdir)
code = get_code(entry_point="cp2k", computer=computer)
# structure
epsilon = 1e-10 # expected precision in Angstrom
dist = 0.74 + epsilon
positions = [(0, 0, 0), (0, 0, dist)]
cell = np.diag([4, -4, 4 + epsilon])
atoms = ase.Atoms("H2", positions=positions, cell=cell)
structure = StructureData(ase=atoms)
# parameters
parameters = Dict(
dict={
"GLOBAL": {"RUN_TYPE": "MD"},
"MOTION": {"MD": {"TIMESTEP": 0.0, "STEPS": 1}}, # do not move atoms
"FORCE_EVAL": {
"METHOD": "Quickstep",
"DFT": {
"BASIS_SET_FILE_NAME": "BASIS_MOLOPT",
"SCF": {"MAX_SCF": 1},
"XC": {"XC_FUNCTIONAL": {"_": "LDA"}},
},
"SUBSYS": {
"KIND": {
"_": "DEFAULT",
"BASIS_SET": "DZVP-MOLOPT-SR-GTH",
"POTENTIAL": "GTH-LDA",
}
},
},
}
)
# resources
options = {
"resources": {"num_machines": 1, "num_mpiprocs_per_machine": 1},
"max_wallclock_seconds": 1 * 60 * 60,
}
inputs = {
"structure": structure,
"parameters": parameters,
"code": code,
"metadata": {"options": options},
}
result = run(CalculationFactory("cp2k"), **inputs)
# check structure preservation
atoms2 = result["output_structure"].get_ase()
# zeros should be preserved exactly
assert np.all(atoms2.positions[0] == 0.0)
# other values should be preserved with epsilon precision
dist2 = atoms2.get_distance(0, 1)
assert abs(dist2 - dist) < epsilon
# check cell preservation
cell_diff = np.amax(np.abs(atoms2.cell - cell))
assert cell_diff < epsilon
| [
"tiziano.mueller@chem.uzh.ch"
] | tiziano.mueller@chem.uzh.ch |
9bba1cc1173df185ad4c40de335d9941e45d914d | 31d7e86a9283970b7154cb4e49dae176eea906a9 | /deependeliminator/api_routes.py | d79fc6ced77aa47557c6b9b8c70bb520d5c3cbd7 | [
"MIT"
] | permissive | brockhaywood/deependeliminator | 6bc8a3e2b152aeb766baff9455933990315e60c1 | b12a6a1100676269641513dc1fa5ad7164141bcb | refs/heads/master | 2022-01-19T07:10:13.874156 | 2019-09-09T15:59:26 | 2019-09-09T17:40:14 | 150,173,719 | 1 | 0 | MIT | 2022-01-06T22:26:49 | 2018-09-24T21:59:10 | JavaScript | UTF-8 | Python | false | false | 378 | py | from application import rebar
from deependeliminator.schemas import FantasyTeamListSchema
registry = rebar.create_handler_registry(prefix='/api/v1')
@registry.handles(
rule='/standings',
method='GET',
marshal_schema=FantasyTeamListSchema()
)
def get_standings_json():
from deependeliminator.standings import get_standings_list
return get_standings_list()
| [
"brock.haywood@gmail.com"
] | brock.haywood@gmail.com |
544d4e15b6fc21377b2f0f60e4b433e7379e7de5 | f020464236bc7b311a6678351e3ba426943f3f0f | /src/tests/storage_test.py | f8540e74294fb226fbaaaed34b7b155ef6737278 | [] | no_license | alexeyden/Project-MSF | 8e586392397661155f7444dbc5efcad1ffb738b2 | c0e457a8eecf244bba2f8d8fbb0f67b67f9b8f28 | refs/heads/master | 2021-01-17T15:24:39.065138 | 2016-05-25T14:16:57 | 2016-05-25T14:16:57 | 53,959,802 | 0 | 1 | null | 2016-05-12T07:08:42 | 2016-03-15T16:16:25 | Python | UTF-8 | Python | false | false | 9,522 | py | #!/usr/bin/env python3
import unittest
import shutil
import os
from storage.storage import *
from storage.exceptions import *
from algorithm.algorithm import *
class TestStorage(unittest.TestCase):
STORAGE = 'storage/'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._clean_storage()
self.storage = Storage(storage_path=self.STORAGE)
os.mkdir(os.path.join(self.STORAGE, 'user1/A'))
os.mkdir(os.path.join(self.STORAGE, 'user1/A/B'))
os.mkdir(os.path.join(self.STORAGE, 'user1/B'))
os.mkdir(os.path.join(self.STORAGE, 'user2/A'))
shutil.copy('data/algorithm.json', os.path.join(self.STORAGE, 'user2/A/Alg1'))
def test_init(self):
self.assertEqual(os.path.exists(os.path.join(self.STORAGE, 'user1')), True)
self.assertEqual(os.path.exists(os.path.join(self.STORAGE, 'user2')), True)
self.assertEqual(os.path.exists(os.path.join(self.STORAGE, 'user3')), True)
def test_valid(self):
self.assertEqual(self.storage.valid('invalid/'), False)
self.assertEqual(self.storage.valid('/invalid/\\'), False)
self.assertEqual(self.storage.valid('/юникод!123(3)[] !'), True)
self.assertEqual(self.storage.valid('/invalid/????'), False)
self.assertEqual(self.storage.valid('/'), True)
def test_owner(self):
with self.assertRaises(InvalidPathError):
self.storage.owner('\/invalid\/')
with self.assertRaises(NoSuchPathError):
self.storage.owner('/user1/C')
with self.assertRaises(NoSuchPathError):
self.storage.owner('/user4/')
with self.assertRaises(NoSuchPathError):
self.storage.owner('/user')
self.assertEqual(self.storage.owner('/user1').login, 'user1')
self.assertEqual(self.storage.owner('/user2/').login, 'user2')
self.assertEqual(self.storage.owner('/user3/').login, 'user3')
def test_list(self):
with self.assertRaises(InvalidPathError):
self.storage.list('%%invalid', context=None)
with self.assertRaises(NoSuchPathError):
self.storage.list('/user4/', context=None)
items = sorted([item.path for item in self.storage.list('/', context=None)])
self.assertListEqual(items, [
'/user1',
'/user2',
'/user3'
])
context = Storage.StorageContext('user1')
items = sorted([item.path for item in self.storage.list('/', context)])
self.assertListEqual(items, [
'/user1',
'/user2'
])
items = sorted([item.path for item in self.storage.list('/user1/', context)])
self.assertListEqual(items, [
'/user1/A',
'/user1/B'
])
items = sorted([item.path for item in self.storage.list('/user1/A/', context)])
self.assertListEqual(items, [
'/user1/A/B'
])
items = sorted([item.path for item in self.storage.list('/user2/A/', context)])
self.assertListEqual(items, [
'/user2/A/Alg1'
])
items = self.storage.list('/', context, True)
valid = {'/':
{
'user1': {
'A': {
'B': {}
},
'B': {}
},
'user2': {
'A': {
'Alg1': None
}
}
}
}
items_dict = {'/': {}}
def traverse(parent, node):
if node.children is not None:
for ch in node.children:
parent[ch.name] = {} if ch.children is not None else None
traverse(parent[ch.name], ch)
root = FileInfo('/', '/', 'user1', True, True, False, False)
root.children = items
traverse(items_dict['/'], root)
self.assertDictEqual(valid, items_dict)
def test_move(self):
context = Storage.StorageContext('user1')
loop = asyncio.get_event_loop()
with self.assertRaises(InvalidPathError):
loop.run_until_complete(self.storage.move('% invalid path %', '/user1/C', context))
with self.assertRaises(InvalidPathError):
loop.run_until_complete(self.storage.move('/user1/A', '% udsds %', context))
with self.assertRaises(NoSuchPathError):
loop.run_until_complete(self.storage.move('/user1/C/', '/user1/D', context))
with self.assertRaises(InvalidPathError):
loop.run_until_complete(self.storage.move('/user1/A/', '/user1/B', context))
with self.assertRaises(InvalidPathError):
loop.run_until_complete(self.storage.move('/user1/A/', '/', context))
with self.assertRaises(InvalidPathError):
loop.run_until_complete(self.storage.move('/user1/A/', '/user2/C/', context))
with self.assertRaises(InvalidPathError):
loop.run_until_complete(self.storage.move('/user2/A/', '/user1/C/', context))
with self.assertRaises(InvalidPathError):
loop.run_until_complete(self.storage.move('/', '/user2/C/', context))
loop.run_until_complete(self.storage.move('/user1/A/', '/user1/B/C', context))
self.assertEqual(self.storage.exists('/user1/A', context), False)
self.assertEqual(self.storage.exists('/user1/B/C/', context), True)
loop.run_until_complete(self.storage.move('/user1/B/C', '/user1/A', context))
self.assertEqual(self.storage.exists('/user1/A', context), True)
self.assertEqual(self.storage.exists('/user1/B/C/', context), False)
def test_remove(self):
context = Storage.StorageContext('user1')
loop = asyncio.get_event_loop()
with self.assertRaises(InvalidPathError):
loop.run_until_complete(self.storage.remove('% invalid path %', context))
with self.assertRaises(NoSuchPathError):
loop.run_until_complete(self.storage.remove('/user1/C', context))
with self.assertRaises(InvalidPathError):
loop.run_until_complete(self.storage.remove('/user2/A', context))
with self.assertRaises(InvalidPathError):
loop.run_until_complete(self.storage.remove('/user2', context))
os.mkdir(os.path.join(self.STORAGE, 'user1/C'))
loop.run_until_complete(self.storage.remove('/user1/C', context))
self.assertEqual(os.path.exists(os.path.join(self.STORAGE, 'user1/C')), False)
def test_file_read(self):
context = Storage.StorageContext('user1')
loop = asyncio.get_event_loop()
with self.assertRaises(InvalidPathError):
loop.run_until_complete(self.storage.file_read('%%invalid', context))
with self.assertRaises(NoSuchPathError):
loop.run_until_complete(self.storage.file_read('/user1/С', context))
with self.assertRaises(NoSuchPathError):
loop.run_until_complete(self.storage.file_read('/user2/A/'))
a = loop.run_until_complete(self.storage.file_read('/user2/A/Alg1'))
self.assertEqual(a.source, '(some (source (here)))')
def test_create(self):
context = Storage.StorageContext('user1')
loop = asyncio.get_event_loop()
loop.run_until_complete(self.storage.create('/user1/A/С', context=context))
self.assertEqual(self.storage.exists('/user1/A/С', context=context), True)
loop.run_until_complete(self.storage.remove('/user1/A/С', context=context))
a = Algorithm(input_spec=['a'], output_spec=['x'], source='(foo)')
loop.run_until_complete(self.storage.create('/user1/A/File', context=context, content=a))
self.assertEqual(self.storage.exists('/user1/A/File', context=context), True)
a = loop.run_until_complete(self.storage.file_read('/user1/A/File'))
self.assertEqual(a.source, '(foo)')
loop.run_until_complete(self.storage.remove('/user1/A/File', context=context))
def test_file_write(self):
context = Storage.StorageContext('user1')
loop = asyncio.get_event_loop()
with self.assertRaises(NoSuchPathError):
loop.run_until_complete(self.storage.file_read('/user1/A', context))
with self.assertRaises(NoSuchPathError):
loop.run_until_complete(self.storage.file_read('/user1/D', context))
a = Algorithm(input_spec=['x'], output_spec=['y'], source='(foo)')
loop.run_until_complete(self.storage.create('/user1/A/File', context=context, content=a))
a.source = '(bar)'
loop.run_until_complete(self.storage.file_write('/user1/A/File', context=context, content=a))
b = loop.run_until_complete(self.storage.file_read('/user1/A/File'))
self.assertEqual(b.source, '(bar)')
loop.run_until_complete(self.storage.remove('/user1/A/File', context=context))
def test_exists(self):
self.assertEqual(self.storage.exists('/'), True)
self.assertEqual(self.storage.exists('/user1'), True)
self.assertEqual(self.storage.exists('/user1/'), True)
self.assertEqual(self.storage.exists('/user2'), True)
self.assertEqual(self.storage.exists('/user3'), True)
def _clean_storage(self):
for p in os.listdir(self.STORAGE):
if os.path.isdir(os.path.join(self.STORAGE, p)):
shutil.rmtree(os.path.join(self.STORAGE, p))
if __name__ == '__main__':
unittest.main() | [
"rtgbnm@gmail.com"
] | rtgbnm@gmail.com |
531a8eb4bdd352bd9dc24e66f5bf494387cb456f | 60c576e4492ddc09a0f541415e7a2330c82e42af | /products/migrations/0004_auto_20180928_1636.py | 95fdd5df8f7956a5a6ab9167d43b1bcc0b72a068 | [] | no_license | ninja15/ecommerce | 3a30dfe9dde73d6900f022f569f43f657a40f65d | ebb9030aa16deb6583d34b682d781375180d7f87 | refs/heads/master | 2020-04-15T16:46:47.403452 | 2019-01-09T11:34:15 | 2019-01-09T11:34:15 | 148,352,760 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-09-28 16:36
from __future__ import unicode_literals
from django.db import migrations, models
import products.models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_product_image'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.ImageField(blank=True, null=True, upload_to=products.models.upload_image_path),
),
]
| [
"marius.toader15@gmail.com"
] | marius.toader15@gmail.com |
d083e6836ba652c863350c9034cdedf9540a9227 | d397eaabe93b1a277df573345976858cc1e9cdcf | /code/poisson2d_pulse.py | 4435b86f5f384b521ef6f8720fd0261c103839a4 | [
"BSD-3-Clause"
] | permissive | linusec/SPINN | a3bb38b7109d6890df7ef4212abf0f0c94821c03 | 636ae3b30cb23e2ffcf060635e5e8e3f3460108c | refs/heads/main | 2023-03-12T20:28:31.608038 | 2021-02-26T09:12:39 | 2021-02-26T09:12:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | # Solve Poisson equation in 2D using SPINN
# \nabla^2 u(x, y) = f(x,y) on [0,1]x[0,1]
# (See pde() method below for exact form of f)
# Zero Dirichlet boundary condition on boundary
import numpy as np
import torch
from common import tensor
from spinn2d import Plotter2D, SPINN2D, App2D
from pde2d_base import RegularPDE
PI = np.pi
class Poisson2D(RegularPDE):
def pde(self, x, y, u, ux, uy, uxx, uyy):
K = 0.02
ex = torch.exp(-(x - 0.25)*(x - 0.25)/K)
fxx = (
(1.0 + ((1.0 - 2.0*x)*(x - 0.25) + x*(1.0 - x))/K)
+ ((1.0 - 2.0*x - 2.0*x*(1 - x)*(x - 0.25)/K)*(x - 0.25)/K)
)*2.0*ex*y*(1 - y)
fyy = 2.0*x*(1.0 - x)*ex
return uxx + uyy + fxx + fyy
def has_exact(self):
return True
def exact(self, x, y):
K = 0.02
return x*(1.0 - x)*y*(1.0 - y)*np.exp(-(x - 0.25)*(x - 0.25)/K)
def boundary_loss(self, nn):
xb, yb = self.boundary()
xbn, ybn = (t.detach().cpu().numpy() for t in (xb, yb))
u = nn(xb, yb)
ub = tensor(self.exact(xbn, ybn))
bc = u - ub
return (bc**2).sum()
if __name__ == '__main__':
app = App2D(
pde_cls=Poisson2D, nn_cls=SPINN2D,
plotter_cls=Plotter2D
)
app.run(nodes=40, samples=120, lr=1e-2) | [
"aparyap@gmail.com"
] | aparyap@gmail.com |
ab4ca5a53dc193a94e6590f3b0c1fcdbbb4c9786 | b2fc9e6bc140a77c730ca0b13a738e139f214f05 | /mbl/net/__init__.py | fa6547be7cdb99ad85c20a97d8c3feb633ead92d | [] | no_license | lachtan/mblib | 31febb60855413aeeb70c15a28f7d4f8204a9c03 | 3378818e74f010c14ffe554cdf79ead3af2dfff7 | refs/heads/master | 2020-05-17T23:17:41.953208 | 2011-01-27T20:06:51 | 2011-01-27T20:06:51 | 753,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 209 | py | from mbl.net._socket import Socket
from mbl.net._socket import SocketInputStream
from mbl.net._socket import SocketOutputStream
from mbl.net._socket import TcpClient
from mbl.net._socket import TcpServer
| [
"lachtan@fnet.cz"
] | lachtan@fnet.cz |
0229b1dbbf5a46f43966e1975c13d1f096d11a2b | a240cd61ce4a7e5ec2d2e19fe4d69c293db61f47 | /WebPageToMD/src/DraftMd.py | 5dcbc2d0ceefa8fd51749021d049f60119b1f08e | [] | no_license | Joiner12/Python_MS | fe537c74b33ef561d209adb620e73c2b4040f262 | c926fca0eb747e1bad5a64fe5c9138d34edbfc07 | refs/heads/master | 2023-04-06T18:27:34.120553 | 2021-04-16T09:01:28 | 2021-04-16T09:01:28 | 263,056,958 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | # -*- coding:utf-8 -*-
import os
def splitRelpath(cur_path):
cu = cur_path.split('/')
def getPageAbsPath(baseurl):
cur_url = baseurl
if cur_url.startswith(r"file:///"):
path_temp = cur_url.replace(r"file:///", "")
path_temp = path_temp.split("/")
page_abs_path = str()
for i in path_temp:
page_abs_path = os.path.join(page_abs_path, i)
if __name__ == "__main__":
url = r"file:///D:/Python_M/Code/WebPageToMD/pages/%E8%B7%AF%E5%BE%84%E8%A7%84%E5%88%92_%20a%20star%EF%BC%8C%20A%E6%98%9F%E7%AE%97%E6%B3%95%E8%AF%A6%E8%A7%A3_DinnerHowe%E7%9A%84%E5%8D%9A%E5%AE%A2-CSDN%E5%8D%9A%E5%AE%A2.html"
getPageAbsPath(url)
| [
"2114768901@qq.com"
] | 2114768901@qq.com |
f072929c9e3cca901370f4fdc9922a25746a2d37 | b6a36fed0a72da4b846fe4302c3b68bd242c9b71 | /web_project/users/signals.py | 0190bb5d1d403619a94beeb93587732a35969768 | [] | no_license | danialmalik/django_blog_app | a2a2318b7ba03b073e7675bfffe1d47086395192 | 14a3aee07429a75df1f7ac35264ab2d7e6b820da | refs/heads/master | 2022-12-08T08:42:30.294316 | 2020-04-02T06:14:14 | 2020-04-02T06:14:14 | 185,347,227 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | from django.db.models.signals import post_save
from django.dispatch import receiver
from rest_framework.authtoken.models import Token
from .models import Profile, User
# This code is triggered whenever a new user has been created and saved to the database.
@receiver(post_save, sender=User)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
# Whenever User is created, create a Profile instance too for that user
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
else:
instance.profile.save()
| [
"danialmalik321@gmail.com"
] | danialmalik321@gmail.com |
94a2a1fa20e97c51852243a2a81a4149bdffabba | fb54704d4a6f9475f42b85d8c470e3425b37dcae | /medium/ex1381.py | 3b090451fda3eb43df141d4f0235c64721da852a | [] | no_license | ziyuan-shen/leetcode_algorithm_python_solution | b2784071a94b04e687fd536b57e8d5a9ec1a4c05 | 920b65db80031fad45d495431eda8d3fb4ef06e5 | refs/heads/master | 2021-06-27T05:19:47.774044 | 2021-02-04T09:47:30 | 2021-02-04T09:47:30 | 210,991,299 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | class CustomStack:
def __init__(self, maxSize: int):
self.stack = []
self.maxSize = maxSize
def push(self, x: int) -> None:
if len(self.stack) < self.maxSize:
self.stack.append(x)
def pop(self) -> int:
if self.stack:
return self.stack.pop()
else:
return -1
def increment(self, k: int, val: int) -> None:
for i in range(min(k, len(self.stack))):
self.stack[i] += val
# Your CustomStack object will be instantiated and called as such:
# obj = CustomStack(maxSize)
# obj.push(x)
# param_2 = obj.pop()
# obj.increment(k,val) | [
"ziyuan.shen@duke.edu"
] | ziyuan.shen@duke.edu |
8fe85c165c882f31473b97cc238e98b2399d9522 | 24e7e0dfaaeaca8f911b40fcc2937342a0f278fd | /venv/Lib/site-packages/psutil/tests/test_contracts.py | 39a525696000f799dfae0c817aeb45a7e8799281 | [
"MIT",
"BSD-3-Clause"
] | permissive | BimiLevi/Covid19 | 90e234c639192d62bb87364ef96d6a46d8268fa0 | 5f07a9a4609383c02597373d76d6b6485d47936e | refs/heads/master | 2023-08-04T13:13:44.480700 | 2023-08-01T08:36:36 | 2023-08-01T08:36:36 | 288,455,446 | 1 | 0 | MIT | 2021-01-22T19:36:26 | 2020-08-18T12:53:43 | HTML | UTF-8 | Python | false | false | 26,188 | py | #!/usr/bin/env python3
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Contracts tests. These tests mainly check API sanity in terms of
returned types and APIs availability.
Some of these are duplicates of tests test_system.py and test_process.py
"""
import errno
import multiprocessing
import os
import signal
import stat
import sys
import time
import traceback
from psutil import AIX
from psutil import BSD
from psutil import FREEBSD
from psutil import LINUX
from psutil import MACOS
from psutil import NETBSD
from psutil import OPENBSD
from psutil import OSX
from psutil import POSIX
from psutil import SUNOS
from psutil import WINDOWS
from psutil._compat import FileNotFoundError
from psutil._compat import long
from psutil._compat import range
from psutil.tests import create_sockets
from psutil.tests import enum
from psutil.tests import GITHUB_WHEELS
from psutil.tests import HAS_CPU_FREQ
from psutil.tests import HAS_NET_IO_COUNTERS
from psutil.tests import HAS_SENSORS_FANS
from psutil.tests import HAS_SENSORS_TEMPERATURES
from psutil.tests import is_namedtuple
from psutil.tests import process_namespace
from psutil.tests import PsutilTestCase
from psutil.tests import PYPY
from psutil.tests import serialrun
from psutil.tests import SKIP_SYSCONS
from psutil.tests import unittest
from psutil.tests import VALID_PROC_STATUSES
import psutil
# ===================================================================
# --- APIs availability
# ===================================================================
# Make sure code reflects what doc promises in terms of APIs
# availability.
class TestAvailConstantsAPIs(PsutilTestCase):
def test_PROCFS_PATH(self):
self.assertEqual(hasattr(psutil, "PROCFS_PATH"),
LINUX or SUNOS or AIX)
def test_win_priority(self):
ae = self.assertEqual
ae(hasattr(psutil, "ABOVE_NORMAL_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "BELOW_NORMAL_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "HIGH_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "IDLE_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "NORMAL_PRIORITY_CLASS"), WINDOWS)
ae(hasattr(psutil, "REALTIME_PRIORITY_CLASS"), WINDOWS)
def test_linux_ioprio_linux(self):
ae = self.assertEqual
ae(hasattr(psutil, "IOPRIO_CLASS_NONE"), LINUX)
ae(hasattr(psutil, "IOPRIO_CLASS_RT"), LINUX)
ae(hasattr(psutil, "IOPRIO_CLASS_BE"), LINUX)
ae(hasattr(psutil, "IOPRIO_CLASS_IDLE"), LINUX)
def test_linux_ioprio_windows(self):
ae = self.assertEqual
ae(hasattr(psutil, "IOPRIO_HIGH"), WINDOWS)
ae(hasattr(psutil, "IOPRIO_NORMAL"), WINDOWS)
ae(hasattr(psutil, "IOPRIO_LOW"), WINDOWS)
ae(hasattr(psutil, "IOPRIO_VERYLOW"), WINDOWS)
@unittest.skipIf(GITHUB_WHEELS, "not exposed via GITHUB_WHEELS")
def test_linux_rlimit(self):
ae = self.assertEqual
ae(hasattr(psutil, "RLIM_INFINITY"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_AS"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_CORE"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_CPU"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_DATA"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_FSIZE"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_MEMLOCK"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_NOFILE"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_NPROC"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_RSS"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_STACK"), LINUX or FREEBSD)
ae(hasattr(psutil, "RLIMIT_LOCKS"), LINUX)
ae(hasattr(psutil, "RLIMIT_MSGQUEUE"), LINUX) # requires Linux 2.6.8
ae(hasattr(psutil, "RLIMIT_NICE"), LINUX) # requires Linux 2.6.12
ae(hasattr(psutil, "RLIMIT_RTPRIO"), LINUX) # requires Linux 2.6.12
ae(hasattr(psutil, "RLIMIT_RTTIME"), LINUX) # requires Linux 2.6.25
ae(hasattr(psutil, "RLIMIT_SIGPENDING"), LINUX) # requires Linux 2.6.8
ae(hasattr(psutil, "RLIMIT_SWAP"), FREEBSD)
ae(hasattr(psutil, "RLIMIT_SBSIZE"), FREEBSD)
ae(hasattr(psutil, "RLIMIT_NPTS"), FREEBSD)
class TestAvailSystemAPIs(PsutilTestCase):
def test_win_service_iter(self):
self.assertEqual(hasattr(psutil, "win_service_iter"), WINDOWS)
def test_win_service_get(self):
self.assertEqual(hasattr(psutil, "win_service_get"), WINDOWS)
def test_cpu_freq(self):
self.assertEqual(hasattr(psutil, "cpu_freq"),
LINUX or MACOS or WINDOWS or FREEBSD)
def test_sensors_temperatures(self):
self.assertEqual(
hasattr(psutil, "sensors_temperatures"), LINUX or FREEBSD)
def test_sensors_fans(self):
self.assertEqual(hasattr(psutil, "sensors_fans"), LINUX)
def test_battery(self):
self.assertEqual(hasattr(psutil, "sensors_battery"),
LINUX or WINDOWS or FREEBSD or MACOS)
class TestAvailProcessAPIs(PsutilTestCase):
def test_environ(self):
self.assertEqual(hasattr(psutil.Process, "environ"),
LINUX or MACOS or WINDOWS or AIX or SUNOS or
FREEBSD or OPENBSD or NETBSD)
def test_uids(self):
self.assertEqual(hasattr(psutil.Process, "uids"), POSIX)
def test_gids(self):
self.assertEqual(hasattr(psutil.Process, "uids"), POSIX)
def test_terminal(self):
self.assertEqual(hasattr(psutil.Process, "terminal"), POSIX)
def test_ionice(self):
self.assertEqual(hasattr(psutil.Process, "ionice"), LINUX or WINDOWS)
@unittest.skipIf(GITHUB_WHEELS, "not exposed via GITHUB_WHEELS")
def test_rlimit(self):
# requires Linux 2.6.36
self.assertEqual(hasattr(psutil.Process, "rlimit"), LINUX or FREEBSD)
def test_io_counters(self):
hasit = hasattr(psutil.Process, "io_counters")
self.assertEqual(hasit, False if MACOS or SUNOS else True)
def test_num_fds(self):
self.assertEqual(hasattr(psutil.Process, "num_fds"), POSIX)
def test_num_handles(self):
self.assertEqual(hasattr(psutil.Process, "num_handles"), WINDOWS)
def test_cpu_affinity(self):
self.assertEqual(hasattr(psutil.Process, "cpu_affinity"),
LINUX or WINDOWS or FREEBSD)
def test_cpu_num(self):
self.assertEqual(hasattr(psutil.Process, "cpu_num"),
LINUX or FREEBSD or SUNOS)
def test_memory_maps(self):
hasit = hasattr(psutil.Process, "memory_maps")
self.assertEqual(
hasit, False if OPENBSD or NETBSD or AIX or MACOS else True)
# ===================================================================
# --- API types
# ===================================================================
class TestSystemAPITypes(PsutilTestCase):
"""Check the return types of system related APIs.
Mainly we want to test we never return unicode on Python 2, see:
https://github.com/giampaolo/psutil/issues/1039
"""
@classmethod
def setUpClass(cls):
cls.proc = psutil.Process()
def assert_ntuple_of_nums(self, nt, type_=float, gezero=True):
assert is_namedtuple(nt)
for n in nt:
self.assertIsInstance(n, type_)
if gezero:
self.assertGreaterEqual(n, 0)
def test_cpu_times(self):
self.assert_ntuple_of_nums(psutil.cpu_times())
for nt in psutil.cpu_times(percpu=True):
self.assert_ntuple_of_nums(nt)
def test_cpu_percent(self):
self.assertIsInstance(psutil.cpu_percent(interval=None), float)
self.assertIsInstance(psutil.cpu_percent(interval=0.00001), float)
def test_cpu_times_percent(self):
self.assert_ntuple_of_nums(psutil.cpu_times_percent(interval=None))
self.assert_ntuple_of_nums(psutil.cpu_times_percent(interval=0.0001))
def test_cpu_count(self):
self.assertIsInstance(psutil.cpu_count(), int)
@unittest.skipIf(not HAS_CPU_FREQ, "not supported")
def test_cpu_freq(self):
if psutil.cpu_freq() is None:
raise self.skipTest("cpu_freq() returns None")
self.assert_ntuple_of_nums(psutil.cpu_freq(), type_=(float, int, long))
def test_disk_io_counters(self):
# Duplicate of test_system.py. Keep it anyway.
for k, v in psutil.disk_io_counters(perdisk=True).items():
self.assertIsInstance(k, str)
self.assert_ntuple_of_nums(v, type_=(int, long))
def test_disk_partitions(self):
# Duplicate of test_system.py. Keep it anyway.
for disk in psutil.disk_partitions():
self.assertIsInstance(disk.device, str)
self.assertIsInstance(disk.mountpoint, str)
self.assertIsInstance(disk.fstype, str)
self.assertIsInstance(disk.opts, str)
@unittest.skipIf(SKIP_SYSCONS, "requires root")
def test_net_connections(self):
with create_sockets():
ret = psutil.net_connections('all')
self.assertEqual(len(ret), len(set(ret)))
for conn in ret:
assert is_namedtuple(conn)
def test_net_if_addrs(self):
# Duplicate of test_system.py. Keep it anyway.
for ifname, addrs in psutil.net_if_addrs().items():
self.assertIsInstance(ifname, str)
for addr in addrs:
if enum is not None and not PYPY:
self.assertIsInstance(addr.family, enum.IntEnum)
else:
self.assertIsInstance(addr.family, int)
self.assertIsInstance(addr.address, str)
self.assertIsInstance(addr.netmask, (str, type(None)))
self.assertIsInstance(addr.broadcast, (str, type(None)))
def test_net_if_stats(self):
# Duplicate of test_system.py. Keep it anyway.
for ifname, info in psutil.net_if_stats().items():
self.assertIsInstance(ifname, str)
self.assertIsInstance(info.isup, bool)
if enum is not None:
self.assertIsInstance(info.duplex, enum.IntEnum)
else:
self.assertIsInstance(info.duplex, int)
self.assertIsInstance(info.speed, int)
self.assertIsInstance(info.mtu, int)
@unittest.skipIf(not HAS_NET_IO_COUNTERS, 'not supported')
def test_net_io_counters(self):
# Duplicate of test_system.py. Keep it anyway.
for ifname, _ in psutil.net_io_counters(pernic=True).items():
self.assertIsInstance(ifname, str)
@unittest.skipIf(not HAS_SENSORS_FANS, "not supported")
def test_sensors_fans(self):
# Duplicate of test_system.py. Keep it anyway.
for name, units in psutil.sensors_fans().items():
self.assertIsInstance(name, str)
for unit in units:
self.assertIsInstance(unit.label, str)
self.assertIsInstance(unit.current, (float, int, type(None)))
@unittest.skipIf(not HAS_SENSORS_TEMPERATURES, "not supported")
def test_sensors_temperatures(self):
# Duplicate of test_system.py. Keep it anyway.
for name, units in psutil.sensors_temperatures().items():
self.assertIsInstance(name, str)
for unit in units:
self.assertIsInstance(unit.label, str)
self.assertIsInstance(unit.current, (float, int, type(None)))
self.assertIsInstance(unit.high, (float, int, type(None)))
self.assertIsInstance(unit.critical, (float, int, type(None)))
def test_boot_time(self):
# Duplicate of test_system.py. Keep it anyway.
self.assertIsInstance(psutil.boot_time(), float)
def test_users(self):
# Duplicate of test_system.py. Keep it anyway.
for user in psutil.users():
self.assertIsInstance(user.name, str)
self.assertIsInstance(user.terminal, (str, type(None)))
self.assertIsInstance(user.host, (str, type(None)))
self.assertIsInstance(user.pid, (int, type(None)))
class TestProcessWaitType(PsutilTestCase):
@unittest.skipIf(not POSIX, "not POSIX")
def test_negative_signal(self):
p = psutil.Process(self.spawn_testproc().pid)
p.terminate()
code = p.wait()
self.assertEqual(code, -signal.SIGTERM)
if enum is not None:
self.assertIsInstance(code, enum.IntEnum)
else:
self.assertIsInstance(code, int)
# ===================================================================
# --- Featch all processes test
# ===================================================================
def proc_info(pid):
tcase = PsutilTestCase()
def check_exception(exc, proc, name, ppid):
tcase.assertEqual(exc.pid, pid)
tcase.assertEqual(exc.name, name)
if isinstance(exc, psutil.ZombieProcess):
if exc.ppid is not None:
tcase.assertGreaterEqual(exc.ppid, 0)
tcase.assertEqual(exc.ppid, ppid)
elif isinstance(exc, psutil.NoSuchProcess):
tcase.assertProcessGone(proc)
str(exc)
assert exc.msg
def do_wait():
if pid != 0:
try:
proc.wait(0)
except psutil.Error as exc:
check_exception(exc, proc, name, ppid)
try:
proc = psutil.Process(pid)
d = proc.as_dict(['ppid', 'name'])
except psutil.NoSuchProcess:
return {}
name, ppid = d['name'], d['ppid']
info = {'pid': proc.pid}
ns = process_namespace(proc)
with proc.oneshot():
for fun, fun_name in ns.iter(ns.getters, clear_cache=False):
try:
info[fun_name] = fun()
except psutil.Error as exc:
check_exception(exc, proc, name, ppid)
continue
do_wait()
return info
@serialrun
class TestFetchAllProcesses(PsutilTestCase):
"""Test which iterates over all running processes and performs
some sanity checks against Process API's returned values.
Uses a process pool to get info about all processes.
"""
def setUp(self):
self.pool = multiprocessing.Pool()
def tearDown(self):
self.pool.terminate()
self.pool.join()
def iter_proc_info(self):
# Fixes "can't pickle <function proc_info>: it's not the
# same object as test_contracts.proc_info".
from psutil.tests.test_contracts import proc_info
return self.pool.imap_unordered(proc_info, psutil.pids())
def test_all(self):
failures = []
for info in self.iter_proc_info():
for name, value in info.items():
meth = getattr(self, name)
try:
meth(value, info)
except AssertionError:
s = '\n' + '=' * 70 + '\n'
s += "FAIL: test_%s pid=%s, ret=%s\n" % (
name, info['pid'], repr(value))
s += '-' * 70
s += "\n%s" % traceback.format_exc()
s = "\n".join((" " * 4) + i for i in s.splitlines())
s += '\n'
failures.append(s)
else:
if value not in (0, 0.0, [], None, '', {}):
assert value, value
if failures:
raise self.fail(''.join(failures))
def cmdline(self, ret, info):
self.assertIsInstance(ret, list)
for part in ret:
self.assertIsInstance(part, str)
def exe(self, ret, info):
self.assertIsInstance(ret, (str, type(None)))
if not ret:
self.assertEqual(ret, '')
else:
if WINDOWS and not ret.endswith('.exe'):
return # May be "Registry", "MemCompression", ...
assert os.path.isabs(ret), ret
# Note: os.stat() may return False even if the file is there
# hence we skip the test, see:
# http://stackoverflow.com/questions/3112546/os-path-exists-lies
if POSIX and os.path.isfile(ret):
if hasattr(os, 'access') and hasattr(os, "X_OK"):
# XXX may fail on MACOS
assert os.access(ret, os.X_OK)
def pid(self, ret, info):
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 0)
def ppid(self, ret, info):
self.assertIsInstance(ret, (int, long))
self.assertGreaterEqual(ret, 0)
def name(self, ret, info):
self.assertIsInstance(ret, str)
# on AIX, "<exiting>" processes don't have names
if not AIX:
assert ret
def create_time(self, ret, info):
self.assertIsInstance(ret, float)
try:
self.assertGreaterEqual(ret, 0)
except AssertionError:
# XXX
if OPENBSD and info['status'] == psutil.STATUS_ZOMBIE:
pass
else:
raise
# this can't be taken for granted on all platforms
# self.assertGreaterEqual(ret, psutil.boot_time())
# make sure returned value can be pretty printed
# with strftime
time.strftime("%Y %m %d %H:%M:%S", time.localtime(ret))
def uids(self, ret, info):
assert is_namedtuple(ret)
for uid in ret:
self.assertIsInstance(uid, int)
self.assertGreaterEqual(uid, 0)
def gids(self, ret, info):
assert is_namedtuple(ret)
# note: testing all gids as above seems not to be reliable for
# gid == 30 (nodoby); not sure why.
for gid in ret:
self.assertIsInstance(gid, int)
if not MACOS and not NETBSD:
self.assertGreaterEqual(gid, 0)
def username(self, ret, info):
self.assertIsInstance(ret, str)
assert ret
def status(self, ret, info):
self.assertIsInstance(ret, str)
assert ret
self.assertNotEqual(ret, '?') # XXX
self.assertIn(ret, VALID_PROC_STATUSES)
def io_counters(self, ret, info):
assert is_namedtuple(ret)
for field in ret:
self.assertIsInstance(field, (int, long))
if field != -1:
self.assertGreaterEqual(field, 0)
def ionice(self, ret, info):
if LINUX:
self.assertIsInstance(ret.ioclass, int)
self.assertIsInstance(ret.value, int)
self.assertGreaterEqual(ret.ioclass, 0)
self.assertGreaterEqual(ret.value, 0)
else: # Windows, Cygwin
choices = [
psutil.IOPRIO_VERYLOW,
psutil.IOPRIO_LOW,
psutil.IOPRIO_NORMAL,
psutil.IOPRIO_HIGH]
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 0)
self.assertIn(ret, choices)
def num_threads(self, ret, info):
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 1)
def threads(self, ret, info):
self.assertIsInstance(ret, list)
for t in ret:
assert is_namedtuple(t)
self.assertGreaterEqual(t.id, 0)
self.assertGreaterEqual(t.user_time, 0)
self.assertGreaterEqual(t.system_time, 0)
for field in t:
self.assertIsInstance(field, (int, float))
def cpu_times(self, ret, info):
assert is_namedtuple(ret)
for n in ret:
self.assertIsInstance(n, float)
self.assertGreaterEqual(n, 0)
# TODO: check ntuple fields
def cpu_percent(self, ret, info):
self.assertIsInstance(ret, float)
assert 0.0 <= ret <= 100.0, ret
def cpu_num(self, ret, info):
self.assertIsInstance(ret, int)
if FREEBSD and ret == -1:
return
self.assertGreaterEqual(ret, 0)
if psutil.cpu_count() == 1:
self.assertEqual(ret, 0)
self.assertIn(ret, list(range(psutil.cpu_count())))
def memory_info(self, ret, info):
assert is_namedtuple(ret)
for value in ret:
self.assertIsInstance(value, (int, long))
self.assertGreaterEqual(value, 0)
if WINDOWS:
self.assertGreaterEqual(ret.peak_wset, ret.wset)
self.assertGreaterEqual(ret.peak_paged_pool, ret.paged_pool)
self.assertGreaterEqual(ret.peak_nonpaged_pool, ret.nonpaged_pool)
self.assertGreaterEqual(ret.peak_pagefile, ret.pagefile)
def memory_full_info(self, ret, info):
assert is_namedtuple(ret)
total = psutil.virtual_memory().total
for name in ret._fields:
value = getattr(ret, name)
self.assertIsInstance(value, (int, long))
self.assertGreaterEqual(value, 0, msg=(name, value))
if LINUX or OSX and name in ('vms', 'data'):
# On Linux there are processes (e.g. 'goa-daemon') whose
# VMS is incredibly high for some reason.
continue
self.assertLessEqual(value, total, msg=(name, value, total))
if LINUX:
self.assertGreaterEqual(ret.pss, ret.uss)
def open_files(self, ret, info):
self.assertIsInstance(ret, list)
for f in ret:
self.assertIsInstance(f.fd, int)
self.assertIsInstance(f.path, str)
if WINDOWS:
self.assertEqual(f.fd, -1)
elif LINUX:
self.assertIsInstance(f.position, int)
self.assertIsInstance(f.mode, str)
self.assertIsInstance(f.flags, int)
self.assertGreaterEqual(f.position, 0)
self.assertIn(f.mode, ('r', 'w', 'a', 'r+', 'a+'))
self.assertGreater(f.flags, 0)
elif BSD and not f.path:
# XXX see: https://github.com/giampaolo/psutil/issues/595
continue
assert os.path.isabs(f.path), f
try:
st = os.stat(f.path)
except FileNotFoundError:
pass
else:
assert stat.S_ISREG(st.st_mode), f
def num_fds(self, ret, info):
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 0)
def connections(self, ret, info):
with create_sockets():
self.assertEqual(len(ret), len(set(ret)))
for conn in ret:
assert is_namedtuple(conn)
def cwd(self, ret, info):
if ret: # 'ret' can be None or empty
self.assertIsInstance(ret, str)
assert os.path.isabs(ret), ret
try:
st = os.stat(ret)
except OSError as err:
if WINDOWS and err.errno in \
psutil._psplatform.ACCESS_DENIED_SET:
pass
# directory has been removed in mean time
elif err.errno != errno.ENOENT:
raise
else:
assert stat.S_ISDIR(st.st_mode)
def memory_percent(self, ret, info):
self.assertIsInstance(ret, float)
assert 0 <= ret <= 100, ret
def is_running(self, ret, info):
self.assertIsInstance(ret, bool)
def cpu_affinity(self, ret, info):
self.assertIsInstance(ret, list)
assert ret != [], ret
cpus = list(range(psutil.cpu_count()))
for n in ret:
self.assertIsInstance(n, int)
self.assertIn(n, cpus)
def terminal(self, ret, info):
self.assertIsInstance(ret, (str, type(None)))
if ret is not None:
assert os.path.isabs(ret), ret
assert os.path.exists(ret), ret
def memory_maps(self, ret, info):
for nt in ret:
self.assertIsInstance(nt.addr, str)
self.assertIsInstance(nt.perms, str)
self.assertIsInstance(nt.path, str)
for fname in nt._fields:
value = getattr(nt, fname)
if fname == 'path':
if not value.startswith('['):
assert os.path.isabs(nt.path), nt.path
# commented as on Linux we might get
# '/foo/bar (deleted)'
# assert os.path.exists(nt.path), nt.path
elif fname == 'addr':
assert value, repr(value)
elif fname == 'perms':
if not WINDOWS:
assert value, repr(value)
else:
self.assertIsInstance(value, (int, long))
self.assertGreaterEqual(value, 0)
def num_handles(self, ret, info):
self.assertIsInstance(ret, int)
self.assertGreaterEqual(ret, 0)
def nice(self, ret, info):
self.assertIsInstance(ret, int)
if POSIX:
assert -20 <= ret <= 20, ret
else:
priorities = [getattr(psutil, x) for x in dir(psutil)
if x.endswith('_PRIORITY_CLASS')]
self.assertIn(ret, priorities)
if sys.version_info > (3, 4):
self.assertIsInstance(ret, enum.IntEnum)
else:
self.assertIsInstance(ret, int)
def num_ctx_switches(self, ret, info):
assert is_namedtuple(ret)
for value in ret:
self.assertIsInstance(value, (int, long))
self.assertGreaterEqual(value, 0)
def rlimit(self, ret, info):
self.assertIsInstance(ret, tuple)
self.assertEqual(len(ret), 2)
self.assertGreaterEqual(ret[0], -1)
self.assertGreaterEqual(ret[1], -1)
def environ(self, ret, info):
self.assertIsInstance(ret, dict)
for k, v in ret.items():
self.assertIsInstance(k, str)
self.assertIsInstance(v, str)
if __name__ == '__main__':
from psutil.tests.runner import run_from_name
run_from_name(__file__)
| [
"50989568+BimiLevi@users.noreply.github.com"
] | 50989568+BimiLevi@users.noreply.github.com |
c7b3a8d8d3cae2a525214934241dd1b96853f0b1 | e9fcfe65e02655af14c07bbd9af7fb1d81debd44 | /cema/agriculture/county/backfill_county_AgWx.py | 97d510295bb2d0da3bb256d5decac0043a156517 | [] | no_license | jsimkins2/UD_SRS | 7884f3abec97b0eb158862e6add0bfd2204c3921 | bdaf8fe59de3b20b3bc1027288be7e6b0454b8eb | refs/heads/master | 2022-09-09T00:38:01.140012 | 2022-08-31T00:00:12 | 2022-08-31T00:00:12 | 103,303,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,721 | py | # deos geopandas to be run at home
import os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from matplotlib.colors import ListedColormap
import geopandas as gpd
from scipy.interpolate import griddata
import rioxarray
import xarray as xr
import pyproj
from pyproj import Proj
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import cartopy.io.img_tiles as cimgt
import cartopy.io.shapereader as shpreader
import cartopy.feature as cfeature
import geopandas
from shapely.geometry import box, mapping
import matplotlib.colors as clr
from matplotlib.colors import BoundaryNorm
import matplotlib as mpl
import pandas as pd
import matplotlib.patheffects as path_effects
import matplotlib.image as image
import time
from datetime import datetime, timedelta, date
# declare paths
shapePaths = "/home/james/mapLayers/"
colorPaths = "/home/james/colorramps/"
raster_path = "/home/sat_ops/deos/temp/"
my_dpi = 100
## define custom functions
def check_crs(crs):
"""Checks if the crs represents a valid grid, projection or ESPG string.
Examples
--------
>>> p = check_crs('+units=m +init=epsg:26915')
>>> p.srs
'+units=m +init=epsg:26915 '
>>> p = check_crs('wrong')
>>> p is None
True
Returns
-------
A valid crs if possible, otherwise None
"""
if isinstance(crs, pyproj.Proj) or isinstance(crs, Grid):
out = crs
elif isinstance(crs, dict) or isinstance(crs, string_types):
try:
out = pyproj.Proj(crs)
except RuntimeError:
try:
out = pyproj.Proj(init=crs)
except RuntimeError:
out = None
else:
out = None
return out
def proj_to_cartopy(proj):
"""Converts a pyproj.Proj to a cartopy.crs.Projection
Parameters
----------
proj: pyproj.Proj
the projection to convert
Returns
-------
a cartopy.crs.Projection object
"""
import cartopy.crs as ccrs
proj = check_crs(proj)
#if proj.is_latlong():
#return ccrs.PlateCarree()
srs = proj.srs
km_proj = {'lon_0': 'central_longitude',
'lat_0': 'central_latitude',
'x_0': 'false_easting',
'y_0': 'false_northing',
'k': 'scale_factor',
'zone': 'zone',
}
km_globe = {'a': 'semimajor_axis',
'b': 'semiminor_axis',
}
km_std = {'lat_1': 'lat_1',
'lat_2': 'lat_2',
}
kw_proj = dict()
kw_globe = dict()
kw_std = dict()
for s in srs.split('+'):
s = s.split('=')
if len(s) != 2:
continue
k = s[0].strip()
v = s[1].strip()
try:
v = float(v)
except:
pass
if k == 'proj':
if v == 'tmerc':
cl = ccrs.TransverseMercator
if v == 'lcc':
cl = ccrs.LambertConformal
if v == 'merc':
cl = ccrs.Mercator
if v == 'utm':
cl = ccrs.UTM
if k in km_proj:
kw_proj[km_proj[k]] = v
if k in km_globe:
kw_globe[km_globe[k]] = v
if k in km_std:
kw_std[km_std[k]] = v
globe = None
if kw_globe:
globe = ccrs.Globe(**kw_globe)
if kw_std:
kw_proj['standard_parallels'] = (kw_std['lat_1'], kw_std['lat_2'])
# mercatoooor
if cl.__name__ == 'Mercator':
kw_proj.pop('false_easting', None)
kw_proj.pop('false_northing', None)
return cl(globe=globe, **kw_proj)
# read in deos special shapefiles
deos_boundarys = gpd.read_file(shapePaths + 'deoscounties.shp')
bigdeos = gpd.read_file(shapePaths + 'TRISTATE_OVERVIEW.shp')
inland_bays = gpd.read_file(shapePaths + 'InlandBays.shp')
state_outline = gpd.read_file(shapePaths + 'tristateMultiaddedPACo.shp')
# create cartopy instance of obscure projection
c = Proj('+proj=tmerc +lat_0=38 +lon_0=-75.41666666666667 +k=0.999995 +x_0=200000 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs')
oldproj = proj_to_cartopy(c)
# load in agwx dataset
bounds=(-76.2,38.3,-74.85, 40.3)
agwx_main = xr.open_dataset("http://basin.ceoe.udel.edu/thredds/dodsC/DEOSAG.nc")
agwx_main = agwx_main.sel(latitude=slice(bounds[3], bounds[1]), longitude=slice(bounds[0],bounds[2]))
dsPrec = xr.open_dataset("http://thredds.demac.udel.edu/thredds/dodsC/NCEPIVQC.nc")
dsPrec = dsPrec.sel(lat=slice(bounds[1], bounds[3]),
lon=slice(bounds[0],bounds[2]),
time=slice(datetime.strptime("2010-01-01", "%Y-%m-%d"),
date.today()))
dsPrec = dsPrec.reindex(lat=list(reversed(dsPrec.lat)))
dsPrec = dsPrec.rename(name_dict= {'lat' : 'latitude'})
dsPrec = dsPrec.rename(name_dict= {'lon' : 'longitude'})
dsPrec = dsPrec.drop('crs')
# create county data frames
chester_ds = xr.Dataset({"time": agwx_main['time'].values})
ncc_ds = xr.Dataset({"time": agwx_main['time'].values})
kent_ds = xr.Dataset({"time": agwx_main['time'].values})
sussex_ds = xr.Dataset({"time": agwx_main['time'].values})
# each geotiff would need to be a lone time slice...going to look into geopandas update
for co in range(0, len(deos_boundarys["NAME"])):
county_outline = deos_boundarys.loc[[co], 'geometry']
print(co)
for var in agwx_main.data_vars:
var_list = []
print(var)
for t in range(0,len(agwx_main.time.values)):
da = xr.DataArray(agwx_main[var][t].values,dims=['latitude', 'longitude'],coords={'longitude': agwx_main.longitude.values, 'latitude' :agwx_main.latitude.values})
da.rio.set_crs("epsg:4326")
da.attrs['units'] = 'Fahrenheit'
da.attrs['standard_name'] = 'Temperature'
da.rio.set_spatial_dims('longitude', 'latitude')
da.rio.to_raster(raster_path + var + str(co) + str(t) + '.tif', overwrite=True)
xds = rioxarray.open_rasterio(raster_path + var + str(co) + str(t) + '.tif')
# clip the interpolated data based on the shapefiles
clipped = xds.rio.clip(county_outline.geometry.apply(mapping), xds.rio.crs, drop=True)
cl = clipped.rio.clip(inland_bays.geometry.apply(mapping), oldproj.proj4_init, drop=False, invert=True)
ds_county = cl.mean()
var_list.append(round(ds_county.values.tolist(),2))
# if we don't remove it, it won't overwrite properly
os.system("/bin/rm " + raster_path + var + str(co) + str(t) + '.tif')
if co == 0:
chester_ds[var] = (['time'], var_list)
if co == 1:
ncc_ds[var] = (['time'], var_list)
if co == 2:
kent_ds[var] = (['time'], var_list)
if co == 3:
sussex_ds[var] = (['time'], var_list)
for co in range(0, len(deos_boundarys["NAME"])):
county_outline = deos_boundarys.loc[[co], 'geometry']
var = 'Precipitation_Flux'
var_list = []
for t in range(0,len(agwx_main.time.values)):
da = xr.DataArray(dsPrec.Precipitation_Flux.sel(time = agwx_main.time.values[t], method = 'nearest').values,dims=['latitude', 'longitude'],coords={'longitude': agwx_main.longitude.values, 'latitude' :agwx_main.latitude.values})
da.rio.set_crs("epsg:4326")
da.attrs['units'] = 'Fahrenheit'
da.attrs['standard_name'] = 'Temperature'
da.rio.set_spatial_dims('longitude', 'latitude')
da.rio.to_raster(raster_path + var + str(co) + str(t) + '.tif', overwrite=True)
xds = rioxarray.open_rasterio(raster_path + var + str(co) + str(t) + '.tif')
# clip the interpolated data based on the shapefiles
clipped = xds.rio.clip(county_outline.geometry.apply(mapping), xds.rio.crs, drop=True)
cl = clipped.rio.clip(inland_bays.geometry.apply(mapping), oldproj.proj4_init, drop=False, invert=True)
ds_county = cl.mean()
var_list.append(round(ds_county.values.tolist(),2))
# if we don't remove it, it won't overwrite properly
os.system("/bin/rm " + raster_path + var + str(co) + str(t) + '.tif')
if co == 0:
chester_ds['NCEPstageIVPrecip'] = (['time'], var_list)
chester_ds['NCEPstageIVPrecip'].attrs['units'] = dsPrec[var].attrs['units']
chester_ds['NCEPstageIVPrecip'].attrs['long_name'] = dsPrec[var].attrs['long_name']
if co == 1:
ncc_ds['NCEPstageIVPrecip'] = (['time'], var_list)
ncc_ds['NCEPstageIVPrecip'].attrs['units'] = dsPrec[var].attrs['units']
ncc_ds['NCEPstageIVPrecip'].attrs['long_name'] = dsPrec[var].attrs['long_name']
if co == 2:
kent_ds['NCEPstageIVPrecip'] = (['time'], var_list)
kent_ds['NCEPstageIVPrecip'].attrs['units'] = dsPrec[var].attrs['units']
kent_ds['NCEPstageIVPrecip'].attrs['long_name'] = dsPrec[var].attrs['long_name']
if co == 3:
sussex_ds['NCEPstageIVPrecip'] = (['time'], var_list)
sussex_ds['NCEPstageIVPrecip'].attrs['units'] = dsPrec[var].attrs['units']
sussex_ds['NCEPstageIVPrecip'].attrs['long_name'] = dsPrec[var].attrs['long_name']
for var in chester_ds.variables:
if var != 'time':
if var != 'NCEPstageIVPrecip' :
chester_ds[var].attrs['units'] = agwx_main[var].attrs['units']
ncc_ds[var].attrs['units'] = agwx_main[var].attrs['units']
kent_ds[var].attrs['units'] = agwx_main[var].attrs['units']
sussex_ds[var].attrs['units'] = agwx_main[var].attrs['units']
chester_ds[var].attrs['long_name'] = agwx_main[var].attrs['long_name']
ncc_ds[var].attrs['long_name'] = agwx_main[var].attrs['long_name']
kent_ds[var].attrs['long_name'] = agwx_main[var].attrs['long_name']
sussex_ds[var].attrs['long_name'] = agwx_main[var].attrs['long_name']
chester_ds.to_netcdf("/data/DEOS/chester/chester_agwx_updated.nc", mode='w')
print('finished chester county')
ncc_ds.to_netcdf("/data/DEOS/ncc/ncc_agwx_updated.nc", mode='w')
print('finished new castle county')
kent_ds.to_netcdf("/data/DEOS/kent/kent_agwx_updated.nc", mode='w')
print('finished kent county')
sussex_ds.to_netcdf("/data/DEOS/sussex/sussex_agwx_updated.nc", mode='w')
print('finished sussex county')
chester_agwx.close()
ncc_agwx.close()
kent_agwx.close()
sussex_agwx.close()
os.system("/bin/mv /data/DEOS/chester/chester_agwx_updated.nc /data/DEOS/chester/chester_agwx.nc")
os.system("/bin/mv /data/DEOS/ncc/ncc_agwx_updated.nc /data/DEOS/ncc/ncc_agwx.nc")
os.system("/bin/mv /data/DEOS/kent/kent_agwx_updated.nc /data/DEOS/kent/kent_agwx.nc")
os.system("/bin/mv /data/DEOS/sussex/sussex_agwx_updated.nc /data/DEOS/sussex/sussex_agwx.nc")
| [
"jamessimkins5@gmail.com"
] | jamessimkins5@gmail.com |
c90c4ba206ae8f738a6b4e434082b9e5301d1d1e | b76ca51657175ad8a3589cc7b10c46f87d490077 | /PyRamen.py | 13bc579363ff6e9daef50296103b3219f88e15f4 | [] | no_license | KPH3802/Python_Ramen | a903ccb81ba47796d629a990ec387e3d73e1de10 | 0fddc5e0a881cb353c0cef330f8035d6895eafd1 | refs/heads/master | 2022-12-01T08:05:49.819506 | 2020-08-06T20:27:47 | 2020-08-06T20:27:47 | 284,310,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | import csv
# Build an empty list to hold menu items
menu = []
# Open and read through each line of csv, appending row to menu list
with open ("Resources/menu_data.csv") as csvfile:
reader = csv.reader(csvfile, delimiter = ',')
header = next(reader)
for i in reader:
menu.append(i)
# Build an empty list to contain sales, appending row to sales list
sales = []
# Open and read through each line of csv
with open ("Resources/sales_data.csv") as csv_sales_file:
reader2 = csv.reader(csv_sales_file, delimiter = ',')
header2 = next(reader2)
for i in reader2:
sales.append(i)
# Initialize a report dictionary
report = {}
nested_dic = {
"01-count": 0,
"02-revenue": 0,
"03-cogs": 0,
"04-profit": 0,
}
for i in sales:
quantity = int(i[3])
sales_item = i[4]
if sales_item not in report.keys():
report[sales_item] = nested_dic
for x in menu:
item = x[0]
price = float(x[3])
cost = float(x[4])
profit = price - cost
if sales_item == item:
x
report[sales_item]["01-count"] += quantity
report[sales_item]["02-revenue"] += price * quantity
report[sales_item]["03-cogs"] += cost * quantity
report[sales_item]["04-profit"] += profit * quantity
else:
pass
# print(f"{sales_item} does not equal {item}! NO MATCH!")
print(report)
with open("report.txt", "w") as txt_file:
for key, value in report.items():
line = f"{key} {value}\n"
txt_file.write(line)
| [
"kph3802@gmail.com"
] | kph3802@gmail.com |
fa6918134f144b570d2dbcdc36fa48e0fdacfa80 | 2e2b2ad583074e30d898bc4af20a515366b6f915 | /api/api/migrations/0001_initial.py | d929ec3345aab18c868ada603c2d8b28d81aa2fc | [
"MIT"
] | permissive | oneillkyle/farm-backend | ce43ddc7681c564f8a068c593525f079913c5b5c | 1d335d9c5803259fbcb1253de8f1d414180465ba | refs/heads/master | 2021-10-08T11:29:47.959312 | 2018-05-10T18:26:58 | 2018-05-10T18:26:58 | 94,968,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,829 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-30 23:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Budget',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('amount', models.DecimalField(decimal_places=2, max_digits=10)),
('start_date', models.DateField()),
('end_date', models.DateField()),
],
options={
'verbose_name': 'Budget',
'db_table': 'budget',
'managed': True,
},
),
migrations.CreateModel(
name='Crop',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'verbose_name': 'Crop',
'db_table': 'crop',
'managed': True,
},
),
migrations.CreateModel(
name='CropHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('start_date', models.DateField()),
('end_date', models.DateField(blank=True, null=True)),
('plot_width_coordinate', models.IntegerField(blank=True, null=True)),
('plot_length_coordinate', models.IntegerField(blank=True, null=True)),
('amount', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('weight', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('number', models.PositiveIntegerField(blank=True, null=True)),
('price', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('serial_number', models.SlugField(blank=True)),
('description', models.TextField(blank=True)),
('crop', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Crop')),
],
options={
'verbose_name': 'Crop History',
'db_table': 'crop_history',
'managed': True,
},
),
migrations.CreateModel(
name='CropStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2000, unique=True)),
('description', models.TextField(max_length=2000)),
],
options={
'verbose_name': 'Crop Status',
'db_table': 'crop_status',
'managed': True,
},
),
migrations.CreateModel(
name='CropType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1024, unique=True)),
('details', models.TextField(blank=True)),
('average_days_to_mature', models.IntegerField(blank=True, null=True)),
('min_row_width', models.IntegerField(blank=True, null=True)),
('min_row_length', models.IntegerField(blank=True, null=True)),
('feeding_requirements', models.TextField(blank=True)),
('min_temp', models.IntegerField(blank=True, null=True)),
('max_temp', models.IntegerField(blank=True, null=True)),
],
options={
'verbose_name': 'Crop Type',
'db_table': 'crop_type',
'managed': True,
},
),
migrations.CreateModel(
name='CropTypeLinks',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('link', models.URLField()),
('crop_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='links', to='api.CropType')),
],
options={
'verbose_name': 'Crop Type Link',
'db_table': 'crop_type_link',
'managed': True,
},
),
migrations.CreateModel(
name='Expense',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10)),
('time_spent', models.DecimalField(blank=True, decimal_places=2, max_digits=5, null=True)),
('start_date', models.DateField()),
('end_date', models.DateField(blank=True, null=True)),
('weight', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('amount', models.PositiveIntegerField(blank=True, null=True)),
('description', models.TextField(blank=True)),
('budget', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.Budget')),
('crop_type', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='expenses', to='api.CropType')),
],
options={
'verbose_name': 'Expense',
'db_table': 'expense',
'managed': True,
},
),
migrations.CreateModel(
name='Farm',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2000, unique=True)),
],
options={
'verbose_name': 'Farm',
'db_table': 'farm',
'managed': True,
},
),
migrations.CreateModel(
name='Inventory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('farm', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='api.Farm')),
],
options={
'verbose_name': 'Inventory',
'db_table': 'inventory',
'managed': True,
},
),
migrations.CreateModel(
name='InventoryItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('weight', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True)),
('amount', models.PositiveIntegerField(blank=True, null=True)),
('crop_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='inventory_items', to='api.CropType')),
('inventory', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='api.Inventory')),
],
options={
'verbose_name': 'Inventory Item',
'db_table': 'inventory_item',
'managed': True,
},
),
migrations.CreateModel(
name='Plot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=256, unique=True)),
('plot_width', models.PositiveIntegerField()),
('plot_length', models.PositiveIntegerField()),
('plot_location', models.IntegerField()),
('farm', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='plots', to='api.Farm')),
],
options={
'verbose_name': 'Plot',
'db_table': 'plot',
'ordering': ['plot_location'],
'managed': True,
},
),
migrations.CreateModel(
name='SeedStatus',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2000, unique=True)),
('description', models.TextField(max_length=2000)),
],
options={
'verbose_name': 'Seed Status',
'db_table': 'seed_status',
'managed': True,
},
),
migrations.CreateModel(
name='Supplier',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2000, unique=True)),
('address', models.CharField(blank=True, max_length=2000)),
('phone', models.IntegerField(blank=True, null=True)),
('email', models.EmailField(blank=True, max_length=254)),
('notes', models.TextField(blank=True)),
],
options={
'verbose_name': 'Supplier',
'db_table': 'supplier',
'managed': True,
},
),
migrations.CreateModel(
name='Threat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=2000, unique=True)),
('description', models.TextField(blank=True)),
],
options={
'verbose_name': 'Threat',
'db_table': 'threat',
'managed': True,
},
),
migrations.AddField(
model_name='croptype',
name='susceptible_to',
field=models.ManyToManyField(to='api.Threat'),
),
migrations.AddField(
model_name='crophistory',
name='plot',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='crop_histories', to='api.Plot'),
),
migrations.AddField(
model_name='crophistory',
name='seed_status',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='crop_histories', to='api.SeedStatus'),
),
migrations.AddField(
model_name='crophistory',
name='status',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='crop_histories', to='api.CropStatus'),
),
migrations.AddField(
model_name='crophistory',
name='supplier',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='crop_histories', to='api.Supplier'),
),
migrations.AddField(
model_name='crop',
name='crop_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='crops', to='api.CropType'),
),
migrations.AddField(
model_name='budget',
name='farm',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='budgets', to='api.Farm'),
),
]
| [
"oneillkyle@gmail.com"
] | oneillkyle@gmail.com |
7a4a2074330ac19f9dfb8accf586953accc5e5fa | 539b469c80ba0f4498588813fcd6f123022263fe | /mxonline/apps/courses/migrations/0007_auto_20170408_2142.py | 8b603cf110f155e61c20e7cc3e3b9db2774518bc | [] | no_license | cucy/mxonline | 0052e8abef529c256563e1883ecc2b589d6c1235 | 3e6a204fd98b88da1896bef48f9b31d160290fdf | refs/heads/master | 2021-01-18T17:28:02.636671 | 2017-09-29T05:24:21 | 2017-09-29T05:24:21 | 86,804,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 698 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-04-08 21:42
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0006_auto_20170408_2141'),
]
operations = [
migrations.AddField(
model_name='video',
name='url',
field=models.CharField(default='www.baidu.com', max_length=100, verbose_name='\u8bbf\u95ee\u5730\u5740'),
),
migrations.AlterField(
model_name='video',
name='name',
field=models.CharField(max_length=100, verbose_name='\u89c6\u9891\u540d'),
),
]
| [
"zrd@zrd-2.local"
] | zrd@zrd-2.local |
57a5f45f9df83dea5f7c475e73100169c6e3f2ae | 7355144a5e406457b375f85077d19c3fb80fb700 | /master/main/views.py | fce5056782e946fed2e4237d5693b4075fb50878 | [] | no_license | Jonah1309/jonzz | dac41a9596f8655db33177c270b74f9f6bce8452 | 3dfa0030a2f70a3f2905c0c44179f2a3cf623cf1 | refs/heads/master | 2022-11-27T18:10:42.147570 | 2020-08-11T14:51:36 | 2020-08-11T14:51:36 | 284,207,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,776 | py | from django.shortcuts import render
import os
import sys
from django.shortcuts import render
from django.views.decorators.csrf import csrf_protect
from django.test import Client
from django.http import HttpResponse
from django.db import connection
from fpdf import FPDF
from .models import Cat
from .models import Sub
from .models import Act
from .models import Users
from django.views.decorators.csrf import csrf_exempt
from django.core.exceptions import ObjectDoesNotExist
# Create your views here.
def homepage(request):
print("cat"+str(Cat.objects.all()))
print("sub"+str(Sub.objects.all()))
print("act"+str(Act.objects.all()))
print("us"+str(Users.objects.all()))
return render(request=request,template_name="main/home_demo.html")
@csrf_exempt
def homepage_choose(request):
print(request.POST)
#print(Cat.objects.all())
#print(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
#print(int(request.POST.get('id')))
userid=int(request.POST.get('id'))
#id=int(userid[1:len(userid)-1])
us_choice=''
#print("Active DB ",connection.settings_dict['NAME'])
try:
print("inside try of sequence_already_present")
u=Users.objects.get(us_id=userid)
us_choice=u.us_des
except ObjectDoesNotExist:
us_choice=''
except Exception as e:
print("exception : " + str(e))
if us_choice!='':
print("true")
print(us_choice)
#return render(request=request,template_name="main/home.html",context={"category":Cat.objects.all(),"subcategory":Sub.objects.all(),"activity":Act.objects.all()})
return render(request,"main/home.html",{"users":[userid,us_choice]})
else:
print("false")
'''
response = HttpResponse('Your message here', status=401)
response['Content-Length'] = len(response.content)
return response'''
return render(request,"main/home.html")
#return render(request=request,template_name="main/home.html",context={"category":Cat.objects.all(),"subcategory":Sub.objects.all(),"activity":Act.objects.all()})
def check(request):
return render(request=request,template_name="main/home.html",context={"category":Cat.objects.all(),"subcategory":Sub.objects.all(),"activity":Act.objects.all()})
def generate_output(request):
cat=request.GET.get('Category')
sub=request.GET.get('Sub_Category')
act=request.GET.get('Activity')
pdf=FPDF()
pdf.add_page()
pdf.set_font("Arial",'B',size=16)
pdf.write(5,"Category\n\n")
line=1
pdf.set_font("Arial",size=12)
waste=['"','[',']']
cat = ''.join(i for i in cat if not i in waste)
cat=cat.split(',')
sub = ''.join(i for i in sub if not i in waste)
sub=sub.split(',')
act = ''.join(i for i in act if not i in waste)
act=act.split(',')
i=0
for c in cat:
if(i==len(cat)-1):
pdf.write(3,c+"\n\n\n")
else:
if(i%4==0 and i!=0):
pdf.write(3,c+"\n\n\n")
else:
pdf.write(3,c+" ")
i+=1
pdf.set_font("Arial",'B',size=16)
pdf.write(5,"Sub Category\n\n")
pdf.set_font("Arial",size=12)
i=0
for s in sub:
if(i==len(sub)-1):
pdf.write(3,s+"\n\n\n")
else:
if(i%4==0 and i!=0):
pdf.write(3,s+"\n\n\n")
else:
pdf.write(3,s+" ")
i+=1
pdf.set_font("Arial",'B',size=16)
pdf.write(5,"Activity\n\n")
pdf.set_font("Arial",size=12)
i=0
for a in act:
if(i==len(act)-1):
pdf.write(3,a+"\n\n\n")
else:
if(i%4==0 and i!=0):
pdf.write(3,a+"\n\n\n")
else:
pdf.write(3,a+" ")
i+=1
pdf.output("Report.pdf")
def generate_new_output(request):
cat=request.GET.get('Category')
sub=request.GET.get('Sub_Category')
act=request.GET.get('Activity')
t=request.GET.get('Reason')
print(t)
print(type(t))
pdf=FPDF()
pdf.add_page()
pdf.set_font("Arial",'B',size=16)
pdf.write(5,"Category\n\n")
line=1
pdf.set_font("Arial",size=12)
waste=['"','[',']']
cat = ''.join(i for i in cat if not i in waste)
cat=cat.split(',')
sub = ''.join(i for i in sub if not i in waste)
sub=sub.split(',')
act = ''.join(i for i in act if not i in waste)
act=act.split(',')
t=''.join(i for i in t if not i in waste)
i=0
for c in cat:
if(i==len(cat)-1):
pdf.write(3,c+"\n\n\n")
else:
if(i%4==0 and i!=0):
pdf.write(3,c+"\n\n\n")
else:
pdf.write(3,c+" ")
i+=1
pdf.set_font("Arial",'B',size=16)
pdf.write(5,"Sub Category\n\n")
pdf.set_font("Arial",size=12)
i=0
for s in sub:
if(i==len(sub)-1):
pdf.write(3,s+"\n\n\n")
else:
if(i%4==0 and i!=0):
pdf.write(3,s+"\n\n\n")
else:
pdf.write(3,s+" ")
i+=1
pdf.set_font("Arial",'B',size=16)
pdf.write(5,"Activity\n\n")
pdf.set_font("Arial",size=12)
i=0
for a in act:
if(i==len(act)-1):
pdf.write(3,a+"\n\n\n")
else:
if(i%4==0 and i!=0):
pdf.write(3,a+"\n\n\n")
else:
pdf.write(3,a+" ")
i+=1
pdf.set_font("Arial",'B',size=16)
pdf.write(5,"\n\n\nReason\n\n")
pdf.set_font("Arial",size=12)
pdf.write(3,t)
pdf.output("Updated_Report.pdf")
| [
"noreply@github.com"
] | noreply@github.com |
95323488f1a2f39dd31806aae172ae8687c22cab | 39fe41a33c00ea6dc8e04c61842c3764fdd07ff1 | /py3standardlib/algorithms/contextlib/contextlib_exitstack_pop_all.py | 68ab294ea41ed5242c5100523e6e1a684725e4f4 | [] | no_license | playbar/pylearn | f9639ffa1848a9db2aba52977de6c7167828b317 | 8bcd1b5a043cb19cde1631947eb128d9c05c259d | refs/heads/master | 2021-06-12T01:51:33.480049 | 2021-03-31T12:16:14 | 2021-03-31T12:16:14 | 147,980,595 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py | # contextlib_exitstack_pop_all.py
import contextlib
from contextlib_context_managers import *
def variable_stack(contexts):
with contextlib.ExitStack() as stack:
for c in contexts:
stack.enter_context(c)
# Return the close() method of a new stack as a clean-up
# function.
return stack.pop_all().close
# Explicitly return None, indicating that the ExitStack could
# not be initialized cleanly but that cleanup has already
# occurred.
return None
print('No errors:')
cleaner = variable_stack([
HandleError(1),
HandleError(2),
])
cleaner()
print('\nHandled error building context manager stack:')
try:
cleaner = variable_stack([
HandleError(1),
ErrorOnEnter(2),
])
except RuntimeError as err:
print('caught error {}'.format(err))
else:
if cleaner is not None:
cleaner()
else:
print('no cleaner returned')
print('\nUnhandled error building context manager stack:')
try:
cleaner = variable_stack([
PassError(1),
ErrorOnEnter(2),
])
except RuntimeError as err:
print('caught error {}'.format(err))
else:
if cleaner is not None:
cleaner()
else:
print('no cleaner returned')
| [
"hgl868@126.com"
] | hgl868@126.com |
1faece1e7017086da3f95d59deab56c91ab020c5 | 444a70b81c4846e6921bb4e242d3f11cd485b03b | /etl.py | d17aa293c6fd96fae11b712168272280f08f6538 | [] | no_license | NunoVazAfonso/dataeng-spark-emr | 3dfcb74a2b5e6eb1376afcb26ce9bb0330ba9cb4 | c073098ff2de7e6dc99155f05eaa51fa2d1edaa0 | refs/heads/master | 2023-03-20T17:37:30.601984 | 2021-03-08T12:51:28 | 2021-03-08T12:51:28 | 343,572,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,216 | py | import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import udf, col
from pyspark.sql.functions import year, month, dayofmonth, hour, weekofyear, date_format, to_timestamp, to_date
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config['AWS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS']['AWS_SECRET_ACCESS_KEY']
def create_spark_session():
"""
- Create or retrieve existing spark session
Returns:
spark -- SparkSession object
"""
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
return spark
def process_song_data(spark, input_data, output_data):
"""
- Processes JSON log files stored in input location
- Transforms dimension tables: songs, artists
- Saves output to parquet files
Arguments:
spark -- instatiated object for spark session
input_data (str) -- path to folder containing log files to be processed
output_data (str) -- output path for final parquet files
"""
print("Song processing : Started")
# get filepath to song data file
song_data = input_data + "song_data/*/*/*/*.json"
# read song data file
df = spark.read.json( song_data )
# extract columns to create songs table
songs_table = df.select( "song_id", "title", "artist_id", "year", "duration" ).distinct()
# write songs table to parquet files partitioned by year and artist
songs_table.write.partitionBy("year", "artist_id").parquet(output_data + "songs.parquet", mode="overwrite")
# extract columns to create artists table
artists_table = df.selectExpr( "artist_id", "artist_name as name", "artist_location as location", "artist_latitude as lattitude", "artist_longitude as longitude" )\
.distinct()
# write artists table to parquet files
artists_table.write.parquet(output_data + "artists.parquet", mode="overwrite")
print("Song processing : Ended")
def process_log_data(spark, input_data, output_data):
"""
- Processes JSON log files stored in input location
- Transforms dimension tables: users, time
- Transforms fact tables: songplays
- Saves output to parquet files
Arguments:
spark -- instatiated object for spark session
input_data (str) -- path to folder containing log files to be processed
output_data (str) -- output path for final parquet files
"""
print("Log processing : Started")
# get filepath to log data file
log_data = input_data + 'log_data/'
# read log data file
df = spark.read.option("recursiveFileLookup","true").json( log_data )
# filter by actions for song plays
df = df.filter( col("page") == "NextSong" )
# extract columns for users table
users_table = df.selectExpr("userId as user_id", "firstName as first_name", "lastName as last_name", "gender", "level").distinct()
# write users table to parquet files
users_table.write.parquet(output_data + "users.parquet", mode="overwrite")
# create timestamp column from original timestamp column
get_timestamp = udf( lambda x : datetime.fromtimestamp( x / 1000 ).strftime( "%Y-%m-%d %H:%M:%S" ) )
df = df.withColumn( "timestamp", to_timestamp( get_timestamp( "ts" ) ) )
# create datetime column from original timestamp column
get_datetime = udf( lambda x : datetime.fromtimestamp( x / 1000 ).strftime( "%Y-%m-%d" ) )
df = df.withColumn( "date", to_date(get_datetime( "ts" )) )
# extract columns to create time table
df.createOrReplaceTempView("timetable")
time_table = spark.sql("""
SELECT DISTINCT
timestamp AS start_time,
HOUR(timestamp) AS hour,
DAY(timestamp) AS day,
WEEKOFYEAR(timestamp) AS week,
MONTH(timestamp) AS month,
YEAR(timestamp) AS year,
DAYOFWEEK(timestamp) AS weekday
FROM timetable
""")
# write time table to parquet files partitioned by year and month
time_table.write.partitionBy("year", "month").parquet(output_data + "time.parquet", mode="overwrite")
# read in song data to use for songplays table
song_df = spark.read.parquet( output_data + "songs.parquet" )
artist_df = spark.read.parquet( output_data + "artists.parquet" ).selectExpr("artist_id as ref_artist" , "name")
song_df = song_df.join(artist_df, song_df.artist_id == artist_df.ref_artist )
if song_df.count() > 0 :
# extract columns from joined song and log datasets to create songplays table
songplays_table = df.join(song_df , (df.artist == song_df.name) & (df.song == song_df.title) , how='left')\
.selectExpr("concat_ws('_', userId, ts) as songplay_id", "timestamp as start_time", "userId as user_id", \
"level", "song_id", "artist_id", "sessionId as session_id", "location", "userAgent as user_agent" )
# write songplays table to parquet files partitioned by year and month
songplays_table.withColumn("year", year("start_time")).withColumn("month", month("start_time"))\
.write.partitionBy("year", "month")\
.parquet(output_data + "songplays.parquet", mode="overwrite")
print("Log processing : Ended")
def main():
"""
- Mais ETL function to run on app called
- Set input and output paths
- Run Log and Song processing functions
"""
spark = create_spark_session()
input_data = config['AWS']['INPUT_DATA']
output_data = config['AWS']['OUTPUT_DATA']
sc = spark.sparkContext
sc._jsc.hadoopConfiguration().set("mapreduce.fileoutputcommitter.algorithm.version", "2")
print("\n ETL Starting\n")
process_song_data(spark, input_data, output_data)
process_log_data(spark, input_data, output_data)
spark.stop()
print("\n ETL Complete\n")
if __name__ == "__main__":
main()
| [
"nunovazafonso@gmail.com"
] | nunovazafonso@gmail.com |
55efe697f1d6349c253177d0b7af16e65adad171 | aa4fc83fdbd9c59372979dd2d94cc8c687d3b51b | /match_statics/views.py | a87b30cc365a43d9afb8ff226eae109620462e4d | [] | no_license | AbdelhamedAbdin/match | 5d597e9f6330a351f67695144f26b4857f8c247a | 1ddfb1e50fe11adf0de3d382340d2b02cccae71c | refs/heads/master | 2022-11-26T08:58:17.308843 | 2020-07-29T23:53:49 | 2020-07-29T23:53:49 | 283,626,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 990 | py | from django.shortcuts import render, redirect
from match.models import Main
from .models import Statistics
from .forms import StaticForm
from django.db.models import F
def player_statics(request, pk):
form = StaticForm()
statics = Statistics.objects.filter(main_id=pk)
main_pk = Main.objects.get(pk=pk)
if request.method == 'POST':
if 'short_pos' in request.POST:
form = StaticForm(request.POST, instance=main_pk.statistics)
if form.is_valid():
instance = form.save(commit=False)
instance.short_pass += 1
instance.save()
# static = Statistics.objects.filter(main_id=pk).update(short_pass=form.cleaned_data['short_pass'] + 1)
# static = Statistics.objects.filter(main_id=pk).update(short_pass=F('short_pass') + 1)
context = {'form': form, 'statics': statics, 'main_pk': main_pk}
return render(request, 'match_statics/player_statics.html', context)
| [
"medoabdin@gmail.com"
] | medoabdin@gmail.com |
37b68d062d53af404f07faf274e26c5996467da9 | 38762f9381a2a3ba958199e5600bdb77750e5918 | /posenet-tf/test_code.py | bf90610c9ee05e2d9b3857dccb2f1890af0d588b | [
"Apache-2.0"
] | permissive | Gymnos-AI/Gymnos-ResearchPaper | d22d39c73a0d0f3db7b6ef31d5e66352cd98cbde | 9d20fa2973d9359a52203f245562eea6b4f2bd4b | refs/heads/master | 2022-11-26T20:28:14.483491 | 2020-04-21T21:31:56 | 2020-04-21T21:31:56 | 285,942,933 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | import cv2
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
img = cv2.imread("./images/james.png")
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY);
# # create a CLAHE object (Arguments are optional).
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
cl1 = clahe.apply(img)
#
# print(img.dtype)
cv2.imshow("James", cl1)
while True:
if cv2.waitKey(1) & 0xFF == ord('q'):
break
| [
"jamesperalta35@gmail.com"
] | jamesperalta35@gmail.com |
1895e0ad222f761f3640d7dc1d37d0017a9be646 | 42828075228410756eb580712004ed4dc1d10cce | /NLP for Data Extraction/MGH_hnp_output_symptom.py | 55377694b9db3c2e933febe89bdc9f46821b1f8c | [
"Apache-2.0"
] | permissive | sotudian/COVID-19-Predictive-Models | 342269308e27889ee24b5433353888fd51407ee3 | ce50500cb48cf06dab8541c1b39f4836f9e4c68c | refs/heads/main | 2023-09-01T05:57:54.524080 | 2021-10-21T03:14:29 | 2021-10-21T03:14:29 | 419,561,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,664 | py | from simple_sentence_segment import sentence_segment
from allennlp.predictors.predictor import Predictor
import scispacy
import spacy
import numpy as np
from unidecode import unidecode
import csv
import re
import os
csv.field_size_limit(1000000000)
nlp = spacy.load("en_ner_bc5cdr_md-0.2.4")
out = open('hnp_out_sym.csv', 'a', newline='', encoding='utf-8')
csv_write = csv.writer(out, dialect='excel')
files = sorted(os.listdir('hnp/'))
# print(files)
n = 0
stop_list_disease = [
'medications prior to admission',
'prior to admission medication',
'home medications',
'allergies',
'family history',
'social history',
'no past medical history on file',
'medications',
'surgical history',
'ed course',
'past medical history',
'patient active problem',
'course',
'cardiology history',
'PHYSICAL EXAMINATION'.lower(),
'Exam',
'ROS']
started = 0
predictor = Predictor.from_path(
"mnli-roberta-large-2020.02.27.tar.gz",
predictor_name="textual-entailment")
gender_table = [
item for item in csv.reader(
open(
'Patient_list.csv',
"r",
encoding='utf-8'))]
gender_dic = {}
for nn in gender_table:
gender_dic[nn[0]] = nn[1]
def combine_sentences(sentences):
st = ''
for i in range(len(sentences)):
st += sentences[i]
if i != len(sentences) - 1:
st += ' '
return st
def assert_stop(line, stop_list):
a = 0
for k in stop_list:
if line[0].lower().find(k) != -1 or line[0].find(k) != -1:
if line[0].find('past medical history') == -1:
a = 1
break
return a
count = 0
for filename in files:
nes_added = []
nes_neg = []
count += 1
D = []
print(filename)
started = 0
lines = [
item for item in csv.reader(
open(
'hnp/' +
filename,
"r",
encoding='utf-8'))]
para = ''
head = lines[0][0]
# print(head.split('|'))
PID = head.split('|')[0]
Gender = gender_dic[PID]
# print(PID)
Time = head.split('|')[5]
# print(Time)
for (i, line) in enumerate(lines):
if started == 0:
if line[0].lower().find('chief complaint') != -1 or line[0].lower().find('reason for admission') != -1 or line[0].lower(
).find('HISTORY OF THE PRESENT ILLNESS'.lower()) != -1 or line[0].lower().find('History of Present Illness'.lower()) != -1:
if line[0].find('chief complaint') == - \
1 and line[0].find('reason for admission') == -1:
# if line[0].lower().find('suthor')!=-1 or
# line[0].lower().find('reason for admission')!=-1:
started = 1
n += 1
if started == 1:
'''if line[0][0]=='?':
disease=line[0].split(' ')[0][1:].strip()
print(disease)
D.append(disease)'''
if assert_stop(line, stop_list_disease) == 1:
break
para += line[0]
para += ' '
# if lines[i+1][0].lower().find('diagnosis')!=-1 or lines[i+2][0].lower().find('diagnosis')!=-1:
# n+=1
# flag=1
# break
# else:
dic = {}
# para=para.replace('_s','no')
print(para)
doc = nlp(para)
nes_nlp = [i for i in list(doc.ents)]
for ne in nes_nlp:
if ne.label_ == 'DISEASE':
# print(ne)
dic[str(ne)] = 0
nes = [
'fever',
'cough',
'dyspnea',
'shortness of breath',
'SOB',
'fatigue',
'diarrhea',
'loose stool',
'nausea',
'vomiting',
'emesis',
'abdominal pain',
'abd pain',
'loss of smell',
'anosmia',
'loss of taste',
'chest pain',
'headache',
'sore throat',
'hemoptysis',
'bloody sputum',
'myalgia',
'muscle aches',
'muscle pains']
#nes=['SOB','abd pain']
for ne in nes:
# if ne.label_=='DISEASE':
# print(ne)
dic[str(ne)] = 0
print(dic.keys())
if Gender == 'Male':
call = 'he'
else:
call = 'she'
for k in dic:
if para.lower().find('no ' + k.lower()) == -1 and para.lower().find('not ' + k.lower()) == -1 and para.lower().find('deny ' + k.lower()) == -1 and para.lower().find('denies ' + k.lower()) == - \
1 and para.lower().find('denied ' + k.lower()) == -1 and para.lower().find('not ' + k.lower()) == -1 and para.lower().find('without ' + k.lower()) == -1 and para.lower().find('non ' + k.lower()) == -1:
dic[k] = 1
NES = [k for k in dic if dic[k] == 1]
# print(NES)
# if nes_nlp==[]:
if para == '':
csv_write.writerow([filename, Time])
continue
# -------------------------------TE task
sentences = []
for s, t in sentence_segment(para):
sentence = para[s:t].strip().replace('\n', ' ')
sentences.append(sentence)
segments = []
window = 1
if len(sentences) <= window:
segments.append(combine_sentences(sentences))
else:
for i in range(int(len(sentences) - window + 1)):
segments.append(combine_sentences(sentences[i:int(i + window)]))
for s in segments:
for ne in NES:
if ne in nes_added:
continue
if s.lower().find(ne.lower()) == -1:
continue
#p=predictor.predict(hypothesis=call+' has '+ne,premise=s)
p = predictor.predict(hypothesis='has ' + ne, premise=s)
if p['label'] == 'entailment':
nes_added.append(ne)
# if p['label']=='contradiction':
# nes_neg.append(ne)
nes_added = [i for i in nes_added if i not in nes_neg]
nes_added = list(set(nes_added))
print(nes_added)
# ------------- into separate file
'''out = open('profile/'+PID+'.csv', 'a', newline='',encoding='utf-8')
csv_write = csv.writer(out, dialect='excel')
csv_write.writerow([filename,'Disease in hnp',Time,D])'''
# ------------- into separate file
out_line = [filename, Time]
out_line.extend(nes_added)
csv_write.writerow(out_line)
if count % 10 == 0:
out.close()
out = open('hnp_out_sym.csv', 'a', newline='', encoding='utf-8')
csv_write = csv.writer(out, dialect='excel')
# if started==0:
# print(filename)
print(n)
# csv_write.writerow([PID,Time])
| [
"noreply@github.com"
] | noreply@github.com |
7f299b8afc5bda2e0208d3c37d8bbe260005312d | 878e65a34f6b10ac81759ab87147022360bfd622 | /functional_tests/multi_output_group_input_test/make.py | 88d20e50a8472006020cd47152b3e8cd4c8ee46a | [] | no_license | SleepingCatGames/csbuild2 | 5f265aec1a490e32503857bff5d1e139992a1652 | c7389961bee3d8e5088c8c3c8c4bb7e273e4ec50 | refs/heads/develop | 2023-08-31T07:17:55.905512 | 2023-08-20T21:45:05 | 2023-08-20T21:45:05 | 100,565,406 | 1 | 1 | null | 2023-08-20T21:45:06 | 2017-08-17T05:36:13 | Python | UTF-8 | Python | false | false | 3,077 | py | # Copyright (C) 2016 Jaedyn K. Draper
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
.. module:: make
:synopsis: Makefile for this test
.. moduleauthor:: Jaedyn K. Draper
"""
from __future__ import unicode_literals, division, print_function
import csbuild
from csbuild.toolchain import Tool
import os
csbuild.SetIntermediateDirectory("intermediate")
csbuild.SetOutputDirectory("out")
class Doubler(Tool):
"""
Simple tool that opens a file, doubles its contents numerically, and writes a new file.
"""
inputFiles = {".first"}
outputFiles = {".second"}
supportedArchitectures = None
def Run(self, inputProject, inputFile):
with open(inputFile.filename, "r") as f:
value = int(f.read())
value *= 2
outFile = os.path.join(inputProject.intermediateDir, os.path.splitext(os.path.basename(inputFile.filename))[0] + ".second")
with open(outFile, "w") as f:
f.write(str(value))
f.flush()
os.fsync(f.fileno())
value *= 2
outFile2 = os.path.join(inputProject.intermediateDir, os.path.splitext(os.path.basename(inputFile.filename))[0] + "2.second")
with open(outFile2, "w") as f:
f.write(str(value))
f.flush()
os.fsync(f.fileno())
return outFile, outFile2
class Adder(Tool):
"""
Simple tool that opens multiple doubled files and adds their contents together numerically, outputting a final file.
"""
inputGroups = {".second"}
outputFiles = {".third"}
supportedArchitectures = None
def RunGroup(self, inputProject, inputFiles):
assert len(inputFiles) == 20, "{} != 20".format(len(inputFiles))
value = 0
for inputFile in inputFiles:
with open(inputFile.filename, "r") as f:
value += int(f.read())
outFile = os.path.join(inputProject.outputDir, inputProject.outputName + ".third")
with open(outFile, "w") as f:
f.write(str(value))
f.flush()
os.fsync(f.fileno())
return outFile
csbuild.RegisterToolchain("AddDoubles", "", Doubler, Adder)
csbuild.SetDefaultToolchain("AddDoubles")
with csbuild.Project("TestProject", "."):
csbuild.SetOutput("Foo", csbuild.ProjectType.Application)
| [
"jaedyn.git@jaedyn.co"
] | jaedyn.git@jaedyn.co |
a6c363f8de0cbe2e52989488be2bcd766f958499 | 306e86ca21d447961a5020d1433e0fb792e5a02f | /structural_patterns/Singleton.py | 34e50f49aebedf6819fbb4a0a2738ed475458de1 | [] | no_license | herodionem/design_pattern_notes | ed2ab435dc92b64d89a27ead1d2b0be59c123123 | 60d03bd305f5ec1039a058bbf46dac8f96196c8f | refs/heads/master | 2021-02-14T20:02:51.227111 | 2020-03-06T05:38:57 | 2020-03-06T05:38:57 | 244,830,378 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,050 | py | class Borg:
_shared_state = {}
def __init__(self):
self.__dict__ = self._shared_state
class Singleton(Borg):
def __init__(self, **kwargs):
super().__init__()
self._shared_state.update(**kwargs)
def __str__(self):
return str(self._shared_state)
x = Singleton(HTTP = "Hyper Text Transfer Protocol")
print(f"printing x: {x}")
print(f"printing x.__dict__: {x.__dict__}")
y = Singleton(FTP = "File Transfer Protocol")
print(f"printing y: {y}")
print(f"printing y.__dict__: {y.__dict__}")
x.__dict__ = {'breaking': 'singleton'}
print(f"printing x: {x}")
print(f"printing x.__dict__: {x.__dict__}")
print(f"printing y: {y}")
print(f"printing y.__dict__: {y.__dict__}")
z = Singleton(KYBO = "Keep Your Bowels Open")
print(f"printing x: {x}")
print(f"x id = {id(x)}")
print(f"x.__dict__ id = {id(x.__dict__)}")
print(f"printing y: {y}")
print(f"y id = {id(y)}")
print(f"y.__dict__ id = {id(y.__dict__)}")
print(f"printing z: {z}")
print(f"z id = {id(z)}")
print(f"z.__dict__ id = {id(z.__dict__)}")
| [
"jonathan@oneclickretail.com"
] | jonathan@oneclickretail.com |
009544a9459c4e1fc01041038795557f4004e62c | 66e92b3427327aec7f719ad095235de4ade194ac | /app/predictionmodels/prophet_predictor.py | 0e6c4bc9db4ee05e919cb2fb8c9b376230ff7788 | [] | no_license | filippajak/FoglightPredictions | 0c78b537e9c07822cd0b315d816dfae2d747521b | 55a1227c6eb730f3405b755aa922d281d058ed54 | refs/heads/master | 2020-04-30T01:40:52.314433 | 2019-03-19T15:00:02 | 2019-03-19T15:00:02 | 176,535,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,032 | py | import pandas as pd
from fbprophet import Prophet
class ProphetPredictor:
def predict(self, paths):
ms_column_name = 'ds'
dataframe = pd.DataFrame({ms_column_name: []})
for datapath in paths:
df = pd.DataFrame(list(datapath.historical_marketshares.items()), columns=['month', datapath.item]).sort_values(by=['month'])
df.Timestamp = pd.to_datetime(df['month'], format='%Y-%m')
df.index = df.Timestamp
df = df.resample('M').mean()
# df1 = Sarimax().predict(df)
df1 = self.predict_single_item(datapath)[[ms_column_name, 'yhat']].copy()
df1 = df1.rename(columns={'yhat': datapath.item})
dataframe = dataframe.merge(df1, on=ms_column_name, how='right')
dataframe.Timestamp = pd.to_datetime(dataframe['ds'], format='%Y-%m')
dataframe.index = dataframe[ms_column_name]
dataframe = dataframe.resample('M').mean()
dataframe = dataframe.clip(lower=0.0)
dataframe.index = dataframe.index.map(str)
# dataframe = dataframe.loc[dataframe.index > '2018-12-31 00:00:00']
print('dataframe', dataframe)
return dataframe
def predict_single_item(self, datapath):
df = pd.DataFrame(list(datapath.historical_marketshares.items()),
columns=['month', datapath.item]).sort_values(by=['month'])
# prepare the input dataframe to be used by Prophet
df = df.rename(columns={df.columns.values.tolist()[1]: "y"})
# df.ds = pd.to_datetime(df['month'], format='%Y-%m')
# df.index = df.ds
# df = df.resample('M').mean()
df = df.rename(columns={df.columns.values.tolist()[0]: "ds"})
m = Prophet()
m.fit(df)
future = m.make_future_dataframe(periods=12, freq='M')
forecast = m.predict(future)
# forecast = forecast[forecast['ds'].dt.day == 1]
# fig1 = m.plot(forecast)
# plt.show(block=True)
return forecast | [
"filip.pajak@qualitytaskforce.com"
] | filip.pajak@qualitytaskforce.com |
e613e0c339f93313e6b0a2aea9de0b2455f7d7b3 | ef3e77a515cf102d60a353a6ea7124506ed553e0 | /data.py | f17bc38f9b862b3aecf6b21324497930b7ee845a | [] | no_license | arpitj07/Flask | df9ec313ecbc13fafe8e005f9f6960d33c7a0361 | 6ef7009a744bb198d4536d1559f913887de4d223 | refs/heads/master | 2020-04-17T22:10:11.074964 | 2019-02-05T06:50:40 | 2019-02-05T06:50:40 | 166,983,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,779 | py | def Articles():
articles = [{
'id': 1,
'title': 'Article One',
'body' : 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod \
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\
quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo \
consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse \
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non \
proident, sunt in culpa qui officia deserunt mollit anim id est laborum',
'Author': 'Arpit Jain',
'created_date':'21-01-2019'
},
{
'id': 2,
'title': 'Article Two',
'body' : 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod \
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\
quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo \
consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse \
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non \
proident, sunt in culpa qui officia deserunt mollit anim id est laborum',
'Author': 'Manish sharma',
'created_date':'15-1-29019'
},
{
'id': 3,
'title': 'Article Three',
'body' : 'Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod \
tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam,\
quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo \
consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse \
cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non \
proident, sunt in culpa qui officia deserunt mollit anim id est laborum',
'Author':'Rahul verma',
'created_date':'01-01-2019'
}
]
return articles | [
"aj070295@gmail.com"
] | aj070295@gmail.com |
cc88ab1445aabd5a7a51b5d3387509e3916cbff2 | 89f23e1020cc35b9bd5b0a5b61a080075c48d675 | /my_app/views.py | e76ca0d728493388b260dcb2f1901c91ed06090d | [] | no_license | Akpozi/first_django_project | 682a51fd3bab52eb8ba990a177723203fefedf5a | b56f5f731de21767e546c364c633d83a3afb1c41 | refs/heads/master | 2022-12-15T03:40:29.903382 | 2020-03-23T03:52:07 | 2020-03-23T03:52:07 | 249,292,825 | 0 | 0 | null | 2022-12-08T03:51:54 | 2020-03-22T23:34:39 | Python | UTF-8 | Python | false | false | 2,102 | py | from django.shortcuts import render
from bs4 import BeautifulSoup
import requests
from requests.compat import quote_plus
from . import models
BASE_CRAIGSLIST_URL = 'https://helsinki.craigslist.org/search/sss?query={}'
BASE_IMAGE_URL = 'https://images.craigslist.org/{}_300x300.jpg'
# Create your views here.
def home(request):
return render(request, 'base.html')
def new_search(request):
search = request.POST.get('search')
# print(quote_plus(search))
models.Search.objects.create(search=search)
final_url = BASE_CRAIGSLIST_URL.format(quote_plus(search))
print(final_url)
# Getting the Web-page, and creating a response object
response = requests.get(final_url)
# Extracting the source code
data = response.text
#print(data)
# Pass the source code to BeautifulSoup to create a BeautifulSoup object for it
soup = BeautifulSoup(data, features='html.parser')
# Extracting all the <a> tags whose class name is 'result-title' into a list
post_listings = soup.find_all('li', {'class': 'result-row'})
final_postings = []
for post in post_listings:
post_title = post.find(class_='result-title').text
post_url = post.find('a').get('href')
# Check if result has Price
if post.find(class_='result-price'):
post_price = post.find(class_='result-price').text
else:
post_price = 'Not Available'
if post.find(class_='result-image').get('data-ids'):
post_image_id = post.find(class_='result-image').get('data-ids').split(',')[0].split(':')[1]
post_image_url = BASE_IMAGE_URL.format(post_image_id)
print(post_image_url)
else:
post_image_url = 'https://images.theconversation.com/files/317421/original/file-20200226-24651-1or3uxq.jpg'
final_postings.append((post_title, post_url, post_price, post_image_url))
# Stuff for front end
design_frontend = {
'search': search,
'final_postings': final_postings,
}
return render(request, 'my_app/new_search.html', design_frontend) | [
"akpo.siemuri@gmail.com"
] | akpo.siemuri@gmail.com |
cbe4b5b0a93c62e34a4f64d8d65fcb3619111147 | 1b5802806cdf2c3b6f57a7b826c3e064aac51d98 | /tensorrt-basic-1.10-3rd-plugin/TensorRT-main/tools/Polygraphy/examples/cli/run/05_comparing_with_custom_input_data/data_loader.py | 4284ddc1e5d6dbe661a164e636b3c38257bcee12 | [
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"ISC",
"BSD-2-Clause"
] | permissive | jinmin527/learning-cuda-trt | def70b3b1b23b421ab7844237ce39ca1f176b297 | 81438d602344c977ef3cab71bd04995c1834e51c | refs/heads/main | 2023-05-23T08:56:09.205628 | 2022-07-24T02:48:24 | 2022-07-24T02:48:24 | 517,213,903 | 36 | 18 | null | 2022-07-24T03:05:05 | 2022-07-24T03:05:05 | null | UTF-8 | Python | false | false | 1,709 | py | #!/usr/bin/env python3
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Demonstrates two methods of loading custom input data in Polygraphy:
Option 1: Defines a `load_data` function that returns a generator yielding
feed_dicts so that this script can be used as the argument for
the --data-loader-script command-line parameter.
Option 2: Writes input data to a JSON file that can be used as the argument for
the --load-inputs command-line parameter.
"""
import numpy as np
from polygraphy.json import save_json
INPUT_SHAPE = (1, 2, 28, 28)
# Option 1: Define a function that will yield feed_dicts (i.e. Dict[str, np.ndarray])
def load_data():
for _ in range(5):
yield {"x": np.ones(shape=INPUT_SHAPE, dtype=np.float32)} # Still totally real data
# Option 2: Create a JSON file containing the input data using the `save_json()` helper.
# The input to `save_json()` should have type: List[Dict[str, np.ndarray]].
# For convenience, we'll reuse our `load_data()` implementation to generate the list.
input_data = list(load_data())
save_json(input_data, "custom_inputs.json", description="custom input data")
| [
"dujw@deepblueai.com"
] | dujw@deepblueai.com |
14cb6b8144983db1ce9f97bb70363ff979dc3a02 | 318569bca9bb5ef104efa60faf23a665460526bd | /Email/__init__.py | 4a8af3351ef0454089b049fb8c4b4e8098863c62 | [] | no_license | gaoziqi/algorithm | 0f48c5e4a4ed4991d940824d0d73c90fada1d842 | 0b832edc6f204985fbb30a5acfc6f3e0714ced6d | refs/heads/master | 2021-05-16T17:46:15.488395 | 2017-09-19T07:26:11 | 2017-09-19T07:26:11 | 103,072,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 31 | py | from Email._email import Email
| [
"571051761@qq.com"
] | 571051761@qq.com |
39e77ccbf51258189e72ccc6a097d942c6d1c9d7 | 1791461e6740f81c2dd6704ae6a899a6707ee6b1 | /Other/SCCPC-B.py | 133b5b79af2e2401618bd09fe29dd8418f5c71a5 | [
"MIT"
] | permissive | HeRaNO/OI-ICPC-Codes | b12569caa94828c4bedda99d88303eb6344f5d6e | 4f542bb921914abd4e2ee7e17d8d93c1c91495e4 | refs/heads/master | 2023-08-06T10:46:32.714133 | 2023-07-26T08:10:44 | 2023-07-26T08:10:44 | 163,658,110 | 22 | 6 | null | null | null | null | UTF-8 | Python | false | false | 2,597 | py | n = 1
while True:
def find_message(s):
for i in range(1, len(s) + 1):
if s[-i] == ':':
return len(s) - i
return None
try:
line = input()
print(n)
n += 1
# Find and delete message
message_start_pos = find_message(line)
message, line = line[message_start_pos + 1:], line[: message_start_pos]
line.strip()
def reach_to_ch(l, ch):
r = l
while r < len(line):
if line[r] == ch:
break
r += 1
return r
l = 0
while l < len(line):
if line[l] == ' ':
l += 1
continue
if line[l].isdigit():
# possible: asctime, level_no, file_name
if l + 4 < len(line) and line[l + 4] == '-':
# must be asctime
print(f'<%(asctime)s,{line[l: l + 23]}>')
l += 23
elif line[l: l + 2] in {'10', '20', '30', '40', '50'}:
# must be level_no
print(f'<%(levelno)s,{line[l: l + 2]}>')
l += 2
else:
# must be file_name
r = reach_to_ch(l, '.')
r = reach_to_ch(r, ' ')
print(f'<%(filename)s,{line[l: r]}>')
l = r
elif line[l].isupper():
# possible: path_name, file_name, level name
if line[l + 1] == ':':
# must be path_name
r = reach_to_ch(l, '.')
r = reach_to_ch(r, ' ')
print(f'<%(pathname)s,{line[l: r]}>')
l = r
elif line[l: l + 5] == 'DEBUG' or \
line[l: l + 4] == 'INFO' or \
line[l: l + 7] == 'WARNING' or \
line[l: l + 5] == 'ERROR' or \
line[l: l + 8] == 'CRITICAL':
r = reach_to_ch(l, ' ')
print(f'<%(levelname)s,{line[l: r]}>')
l = r
else:
# must be file name
r = reach_to_ch(l, '.')
r = reach_to_ch(r, ' ')
print(f'<%(filename)s,{line[l: r]}>')
l = r
elif line[l].islower():
# must be logger name, line no, thread id, thread name, process id, file name
if ':' in line[l: l + 8]:
# must be logger name, line no, thread id, thread name, process id
r = reach_to_ch(l, ' ')
t = line[l: r].split(':')
if t[0] == 'logger':
print(f'<%(name)s,{line[l: r]}>')
elif t[0] == 'line':
print(f'<%(lineno)d,{line[l: r]}>')
elif t[0] == 'thread':
if t[1].isdigit():
print(f'<%(thread)d,{line[l: r]}>')
else:
print(f'<%(threadName)s,{line[l: r]}>')
elif t[0] == 'process':
print(f'<%(process)s,{line[l: r]}>')
else:
raise RuntimeError
l = r
else:
# must be file name
r = reach_to_ch(l, '.')
r = reach_to_ch(r, ' ')
print(f'<%(filename)s,{line[l: r]}>')
l = r
else:
raise RuntimeError
print(f'<%(message)s,{message}>')
except EOFError:
break
| [
"heran55@126.com"
] | heran55@126.com |
fb81b3687432d04bbd58b02c1555e76963320ed2 | cbcacc6395582426948df7159cd9fc3f1dcd02a2 | /test/stack_test_case.py | 82bf75424643294568e0aacbd790ab05a807244c | [] | no_license | alimills/stacks | a8c9cf9b1634cf4c43efcc3ff224cb4b5f100b8f | f8d8b3424dac2cea2fc73fe2ccba7031a8572a98 | refs/heads/master | 2021-01-19T06:46:05.153156 | 2011-06-02T01:45:01 | 2011-06-02T01:45:01 | 1,829,953 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | import unittest
from google.appengine.ext import db
from google.appengine.ext import testbed
class StackTestCase(unittest.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.setup_env(app_id = "12345")
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
def tearDown(self):
self.testbed.deactivate()
| [
"lbayes@google.com"
] | lbayes@google.com |
1f8829b07125e1234db7be25020b22d9ba7231fb | 86758a2c0767c6571cfab3f678c9c23c7491ae45 | /ejercicio_libro/login/pag70_ej_1.py | f80538c927fb93fb42289d1f7d15f66cb20d985b | [] | no_license | tucuwar/python | d51383fbdde78074d7c9673653109f1acb4b7661 | b60435b21419bd31285d703a565d5294a46ea01b | refs/heads/master | 2021-07-12T02:48:45.978793 | 2019-02-15T13:02:23 | 2019-02-15T13:02:23 | 129,744,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,213 | py | # -*- coding: utf-8 -*-
#Curso Python para Principiantes - Eugenia Bahit
# pagina 70
# ejercicio 1
# Crear un módulo para validación de nombres de usuarios.
# Dicho módulo, deberá cumplir con los siguientes criterios de aceptación:
# El nombre de usuario debe contener un mínimo de 6 caracteres y un máximo de 12
# El nombre de usuario debe ser alfanumérico
# Nombre de usuario con menos de 6 caracteres, retorna el mensaje “El nombre de
# usuario debe contener al menos 6 caracteres”
# Nombre de usuario con más de 12 caracteres, retorna el mensaje “El nombre de
# usuario no puede contener más de 12 caracteres”
# Nombre de usuario con caracteres distintos a los alfanuméricos, retorna el mensaje
# “El nombre de usuario puede contener solo letras y números”
# Nombre de usuario válido, retorna True
def validaUsr(usuario):
check1 = False
check11 = False
check2 = False
tam = len(usuario)
alfanum = usuario.isalnum() # isalnum() verifica cadena alfanumerica
if tam >= 6:
#print ("tam mayor igual que 6 - OK")
check1 = True
if tam <= 12:
#print ("tam menor igual que 12 - OK")
check11 = True
else:
#print ("El nombre de usuario no puede contener más de 12 caracteres")
check11 = False
else:
#print ("El nombre de usuario debe contener al menos 6 caracteres")
check1 = False
if alfanum is True:
#print ("Cadena alfanumerica")
check2 = True
#else:
#print ("El nombre de usuario puede contener solo letras y números")
check_usr = (bool(check1) and bool(check11)) and bool(check2)
#if check_usr is True:
# print ("Nombre de usuario - OK")
#else: print ("Nombre usuario mal formado")
return check_usr
#raw_input("Pulsa una tecla para continuar...sale")
#print ("Ingrese usuario y contraseña")
#print (""" Recuerde:
# El nombre de usuario debe contener un mínimo de 6 caracteres y un máximo de 12
# El nombre de usuario puede ser alfanumérico
# """)
#usuario = raw_input("Usuario: ")
#password = raw_input("Contraseña: ")
#validaUsr(usuario)
| [
"tucuwar@gmail.com"
] | tucuwar@gmail.com |
a9eefa9e0f5919b3abeaf90ec4d77ceb8d553d30 | e9a2aebaf84baf158beb156bc90c405a15f5fc9b | /uploader.py | 9a36a15c55d44a4998296e3fcaafdee0c7f852e1 | [
"Unlicense"
] | permissive | salvadorAnt/esp8266web | 3fc93c3bbe83096b44b8471b46517bd20dfa75f6 | ecb33f3f4dfba0878baaf60dc898261d78f19e40 | refs/heads/master | 2023-03-15T15:16:58.971230 | 2016-07-08T14:51:57 | 2016-07-08T14:51:57 | 569,703,261 | 1 | 0 | Unlicense | 2022-11-23T12:31:04 | 2022-11-23T12:31:03 | null | UTF-8 | Python | false | false | 6,087 | py | #-*- coding: utf-8 -*-
#
# test webfs_upload.py
# PV` Created on 26/09/2015.
#
# Bases:
#---------------------------------------------------
# 2006/02 Will Holcomb <wholcomb@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# 2007/07/26 Slightly modified by Brian Schneider
#
# in order to support unicode files ( multipart_encode function )
# From http://peerit.blogspot.com/2007/07/multipartposthandler-doesnt-work-for.html
#
# 2013/07 Ken Olum <kdo@cosmos.phy.tufts.edu>
#
# Removed one of \r\n and send Content-Length
#
# 2014/05 Applied Fedora rpm patch
#
# https://bugzilla.redhat.com/show_bug.cgi?id=920778
# http://pkgs.fedoraproject.org/cgit/python-MultipartPostHandler2.git/diff/python-MultipartPostHandler2-cut-out-main.patch?id=c1638bb3e45596232b4d02f1e69901db0c28cfdb
#
# 2014/05/09 Sergio Basto <sergio@serjux.com>
#
# Better deal with None values, don't throw an exception and just send an empty string.
#
#---------------------------------------------------
import sys
import urllib
import urllib2
import mimetools #, mimetypes
import os
import stat
from base64 import standard_b64encode
from cStringIO import StringIO
class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
class MultipartPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if type(value) == file:
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, 1)
else:
boundary, data = self.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
if(request.has_header('Content-Type')
and request.get_header('Content-Type').find('multipart/form-data') != 0):
print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
# authstr = 'Basic ' + standard_b64encode('ESP8266' + ':' + '0123456789')
# if(request.has_header('Authorization')):
# print "Replacing %s with %s" % (request.get_header('Authorization'), authstr)
# request.add_unredirected_header('Authorization', authstr)
request.add_data(data)
return request
def multipart_encode(vars, files, boundary = None, buffer = None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buffer is None:
buffer = StringIO()
for(key, value) in vars:
buffer.write('--%s\r\n' % boundary)
buffer.write('Content-Disposition: form-data; name="%s"' % key)
if value is None:
value = ""
# if type(value) is not str, we need str(value) to not error with cannot concatenate 'str'
# and 'dict' or 'tuple' or somethingelse objects
buffer.write('\r\n\r\n' + str(value) + '\r\n')
for(key, fd) in files:
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
filename = fd.name.split('/')[-1]
# contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
contenttype = 'application/octet-stream'
buffer.write('--%s\r\n' % boundary)
buffer.write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename))
buffer.write('Content-Type: %s\r\n' % contenttype)
buffer.write('Content-Length: %s\r\n' % file_size)
fd.seek(0)
buffer.write('\r\n' + fd.read() + '\r\n')
buffer.write('--' + boundary + '--\r\n')
buffer = buffer.getvalue()
return boundary, buffer
multipart_encode = Callable(multipart_encode)
https_request = http_request
if __name__ == '__main__':
if len(sys.argv) == 2:
if sys.argv[1] == '-h':
print 'Usage: option filename espurl username password'
sys.exit(0)
fileoption = 'file'
filename = './webbin/WEBFiles.bin'
espurl = 'http://sesp8266/fsupload'
username = 'ESP8266'
password = '0123456789'
if len(sys.argv) > 1:
if sys.argv[1]:
fileoption = sys.argv[1]
if len(sys.argv) > 2:
if sys.argv[2]:
filename = sys.argv[2]
if len(sys.argv) > 3:
if sys.argv[3]:
espurl = sys.argv[3]
if len(sys.argv) > 4:
if sys.argv[4]:
username = sys.argv[4]
if len(sys.argv) > 5:
if sys.argv[5]:
password = sys.argv[5]
print('Start send %s to %s' % (filename, espurl))
opener = urllib2.build_opener(MultipartPostHandler)
authstr = 'Basic ' + standard_b64encode(username + ':' + password)
opener.addheaders.append(['Authorization', authstr])
params = { fileoption : open(filename, 'rb') }
try:
resp = opener.open(espurl, params)
print('End, response code: %s\n' % resp.code)
sys.exit(0)
except Exception as e:
print('Failed open (%s) %s\n' % (str(e).decode('cp1251'), espurl))
sys.exit(1)
| [
"pvvx@mail.ru"
] | pvvx@mail.ru |
a9105c35de4005f9d6ed60f426c89dea109e2f22 | e16b5c0b57867f6d60183549b026c7a607578606 | /accounts/views.py | b3af1dfb290dfb482be2b1f39fa0498df019fdaf | [] | no_license | Minbyeong/Django_P02 | e48a795d707363dfcf7ca16d1502645a909f56af | 68fe4e4adad1058fa0bc89538013bf3035627cb6 | refs/heads/master | 2023-01-30T06:52:12.972791 | 2020-12-06T11:00:39 | 2020-12-06T11:00:39 | 319,011,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 719 | py | from django.shortcuts import render
from .forms import RegisterForm
# Create your views here.
# CRUD Create, Update
def register(request):
if request.method == "POST":
# 회원 가입 데이터 입력 완료
user_form = RegisterForm(request.POST)
if user_form.is_valid():
new_user = user_form.save(commit=False)
new_user.set_password(user_form.cleaned_data['password'])
new_user.save()
return render(request, 'registration/register_done.html',{'new_user':new_user})
else:
# 회원 가입 내용을 입력하는 상황
user_form = RegisterForm()
return render(request, 'registration/register.html', {'form':user_form}) | [
"mandu0955@gmail.com"
] | mandu0955@gmail.com |
11916a289073fbbde26be97dd094185aef82f248 | 62fc02609073c172fe36936cffde67d937f93c1c | /.github/functional-testing/ftlib/executable_test.py | 70a445fb9711b6b2e1d37a210d1fd288cb828860 | [] | no_license | DudaevAR/engnr-nsu-cpp-2022 | 44f4dd81b387e558f2775a9c98e6208f1798f798 | 7ebddaf69c49b5894f4ff6189b31ebec6df17400 | refs/heads/main | 2023-08-25T14:49:01.277588 | 2021-10-29T04:22:56 | 2021-10-29T06:16:20 | 425,638,561 | 0 | 0 | null | 2021-11-07T23:13:35 | 2021-11-07T23:13:34 | null | UTF-8 | Python | false | false | 4,731 | py | import os
import subprocess
from typing import Callable, List, Optional
class ExecutableTestFailedError(RuntimeError):
pass
class ExecutableTest:
def __init__(
self,
name: str,
executable_path: str,
executable_args: List[str] = [],
working_dir: str = None,
stdin: str = None,
# (self, status_code) -> None
exe_expected_status_codes_tester: 'Callable[[ExecutableTest, int], None]' = None,
# (self, stdout, stderr) -> None
exe_ostreams_expected_content_tester: 'Callable[[ExecutableTest, str, str], None]' = None,
# (self) -> None
fs_expected_state_tester: 'Callable[[ExecutableTest], None]' = None,
# (self) -> None
setup_routine: 'Callable[[ExecutableTest], None]' = None,
# (self) -> None
teardown_routine: 'Callable[[ExecutableTest], None]' = None
):
self.__name: str = str() if name is None else name
self.__setup_routine: Callable[[self], None] = setup_routine
self.__teardown_routine: Callable[[self], None] = teardown_routine
self.__executable_path: str = executable_path
self.__executable_args: List[str] = executable_args
self.__working_dir: str = working_dir
self.__stdin: str = stdin
self.__exe_expected_status_codes_tester: Callable[[self, int], None] = exe_expected_status_codes_tester
self.__exe_ostreams_expected_content_tester: Callable[[self, str, str], None] = exe_ostreams_expected_content_tester
self.__fs_expected_state_tester: Callable[[self], None] = fs_expected_state_tester
# Returns exception occurred during execution of the teardown routine.
# Exceptions occurred in any other stage of testing will be re-thrown.
def test_and_throw_errors(self) -> Optional[Exception]:
if self.__setup_routine is not None:
self.__setup_routine(self)
run_exe_exception = self.__test_executable_safe()
teardown_exception = self.__teardown_safe()
if run_exe_exception is not None:
raise ExecutableTestFailedError from run_exe_exception
return teardown_exception
def get_name(self) -> str:
return self.__name
name = property(fget=get_name)
def set_executable_path(self, new_path: str):
self.__executable_path = new_path
def get_executable_path(self) -> str:
return self.__executable_path
executable_path = property(fget=get_executable_path, fset=set_executable_path)
def set_working_dir(self, wd: str):
self.__working_dir = wd
def get_working_dir(self) -> str:
return self.__working_dir if ( (self.__working_dir is not None) and (len(self.__working_dir) > 0) ) else os.getcwd()
working_dir = property(fget=get_working_dir, fset=set_working_dir)
def get_exe_full_command(self) -> str:
args_str = ' '.join(f'"{w}"' for w in self.__executable_args)
return f'"{self.__executable_path}" {args_str}'
def __teardown_safe(self) -> Optional[Exception]:
try:
if self.__teardown_routine is not None:
self.__teardown_routine(self)
except Exception as err:
return err
return None
def __test_executable_safe(self) -> Optional[Exception]:
try:
self.__test_executable_unsafe()
except Exception as err:
return err
def __test_executable_unsafe(self):
completed_process = self.__run_executable_unsafe()
try:
try:
if self.__exe_expected_status_codes_tester is not None:
self.__exe_expected_status_codes_tester(self, completed_process.returncode)
finally:
if self.__exe_ostreams_expected_content_tester is not None:
self.__exe_ostreams_expected_content_tester(self, completed_process.stdout, completed_process.stderr)
finally:
if self.__fs_expected_state_tester is not None:
self.__fs_expected_state_tester(self)
def __run_executable_unsafe(self) -> subprocess.CompletedProcess:
args = [self.__executable_path] if self.__executable_args is None else [self.__executable_path] + self.__executable_args
stdin, input = (subprocess.DEVNULL, None) if self.__stdin is None else (None, self.__stdin)
capture_output = False if self.__exe_ostreams_expected_content_tester is None else True
text = True
cwd = self.__working_dir
return subprocess.run(
args=args,
stdin=stdin,
input=input,
capture_output=capture_output,
text=text,
cwd=cwd
)
| [
"nikita.provotorov.work@gmail.com"
] | nikita.provotorov.work@gmail.com |
89b1a60305e5bb3cd02add3b33ad77a51bae4d59 | b7ab3435d4ec4802db9859d0b8a26678c0315f34 | /naive_bayes.py | 6a1eb86976119bc17f2dc1d3c68138b0bec2de0f | [] | no_license | SunShuoyue/Machine-Learning-numpy | 757e161a9b2b906b4b0f0f1c7d9fa5ffb8ee6053 | 08162f343ea0b0189120bb308555f363bc074794 | refs/heads/master | 2020-08-15T01:34:30.620799 | 2020-03-05T03:22:29 | 2020-03-05T03:22:29 | 215,261,126 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,341 | py | import numpy as np
import pandas as pd
import jieba
# prepare data
df = pd.read_csv('data/data.csv')
data = np.array(df)
np.random.shuffle(data)
test_ratio = 0.2
rows, variables = data[:, :-1].shape
X_train = data[:int(rows * (1 - test_ratio)), :variables]
y_train = data[:int(rows * (1 - test_ratio)), variables]
X_test = data[int(rows * (1 - test_ratio)):, :variables]
y_test = data[int(rows * (1 - test_ratio)):, variables]
# train
data = []
words = []
for sen in X_train[:, 0]:
word = jieba.lcut(sen)
data.append(word)
words += word
dictionary = dict(zip(set(words), range(len(set(words)))))
classes = np.unique(df.label)
y_train = np.array(df.label)
bayes = np.ones([len(classes), len(dictionary)]) * 10e-12
for i in range(len(data)):
length = len(data[i])
for word in data[i]:
bayes[y_train[i]][dictionary[word]] += 1 / length
bayes = bayes * (np.bincount(np.array(y_train, dtype=int)) / len(y_train)).reshape(-1, 1)
# test
y_pred = []
for sen in X_test[:, 0]:
words = jieba.lcut(sen)
pred = [np.nan] * len(classes)
for i in classes:
lik = 0
for word in words:
if word in dictionary:
lik += np.log(bayes[i][dictionary[word]])
pred[i] = lik
y_pred.append(np.argsort(pred)[-1])
acc = np.sum(y_test == y_pred) / len(y_test)
print(acc)
| [
"shuoyue.sun13@alumni.xjtlu.edu.cn"
] | shuoyue.sun13@alumni.xjtlu.edu.cn |
cc7bd602015e300fbfa15f8be4f4761509447333 | 1e6339be3e602e967ce407d1900d54a99da995b2 | /models/item.py | cb3aacdfd853d4cc648f4650164ca62a1d9295fa | [] | no_license | Bolinooo/stores-rest-api | 8e2ecd4588016576e332b8917ded243db2676765 | faeb7afc2d48f194d1561e87a2cefc29e720d29f | refs/heads/master | 2021-06-28T23:24:31.032662 | 2017-09-16T11:23:08 | 2017-09-16T11:23:08 | 103,740,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | from db import db
class ItemModel(db.Model):
__tablename__ = 'items'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
price = db.Column(db.Float(precision=2))
store_id = db.Column(db.Integer, db.ForeignKey('stores.id'))
store = db.relationship('StoreModel')
def __init__(self, name, price, store_id):
self.name = name
self.price = price
self.store_id = store_id
def json(self):
return {'name': self.name, 'price': self.price}
@classmethod
def find_by_name(cls, name):
return cls.query.filter_by(name=name).first()
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
| [
"0864947@hr.nl"
] | 0864947@hr.nl |
b917b540fd2bee540dcc58d0a70966a61aea5234 | 263188a7a2a751a5cac51197a6c141159e7c1a37 | /todolist_app/migrations/0002_tasklist_owner.py | ee60c7cf625736952d99ae0225e8d7c62c71328f | [] | no_license | jconner256/taskmate | 4093f438873df12fe16dbf268bb85a9df271bb8d | 38c8bd07536d0ae2d137dce08421a57886f0a3c4 | refs/heads/master | 2023-08-21T14:48:42.487848 | 2020-06-28T21:22:19 | 2020-06-28T21:22:19 | 275,656,524 | 0 | 0 | null | 2021-09-22T19:21:09 | 2020-06-28T19:44:10 | Python | UTF-8 | Python | false | false | 586 | py | # Generated by Django 3.0.7 on 2020-06-28 12:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('todolist_app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tasklist',
name='owner',
field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"jeremyconner256@gmail.com"
] | jeremyconner256@gmail.com |
7c99f6a0279f12f467c7142b67eb53b1227d1a73 | fe3adbd584b713a387fb8c9c238991f3d865990f | /users/signals.py | 85b09417c55ae7fd2405ffa99db60a1aec306cc8 | [] | no_license | radhadman/django_app | ae70e23953f2b3695c56c54cfe1314c233ec4911 | 52c8188c1cf42e04cef3b6a9b51e11f1947dda63 | refs/heads/master | 2023-08-14T17:06:43.813643 | 2021-09-14T21:44:22 | 2021-09-14T21:44:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | from django.db.models.signals import post_save
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile
@receiver(post_save, sender=User)
def save_profile(sender, instance, **kwargs):
instance.profile.save()
| [
"haddon.sawatzky@student.ufv.ca"
] | haddon.sawatzky@student.ufv.ca |
f4b2c710e8a80b823cb7abf0cd54994e1b1abba4 | ad8e012d13be8f2e21e9341df5d97b2606c1d1e1 | /searchEngine/search_engine.py | 793a505b0affaa18f4655cf56189c2bb070e9923 | [] | no_license | sky102/WebSearchEngine | a47eef05a90f2eb824da18327011daeaf947cb0c | a86f803b954055371fe47dd1322814e9ffa2b061 | refs/heads/main | 2023-01-28T04:22:59.837852 | 2020-11-30T22:21:13 | 2020-11-30T22:21:13 | 317,337,324 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,713 | py | import tkinter as tk
#from tkinter import ttk
import webbrowser
import pymongo
from pymongo import MongoClient
from bs4 import BeautifulSoup
from nltk.stem import PorterStemmer
from nltk import word_tokenize
import re
from corpus import Corpus
import json
from collections import defaultdict
import math
from sklearn.metrics.pairwise import cosine_similarity
WINDOW_WIDTH = 600
WINDOW_HEIGHT = 550
class FilePage:
"""
This class creates a tkinter window that displays the given file.
"""
def __init__(self, file, link=None):
self.root = tk.Tk()
self.file = file
if link == None:
link = str(file)
self.root.title(link)
self.root.minsize(width = WINDOW_WIDTH, height = WINDOW_HEIGHT)
self.root.rowconfigure(0, weight = 1)
self.root.columnconfigure(0, weight = 1)
self.create_widgets()
def create_widgets(self):
self.display = tk.Text(self.root, wrap=tk.NONE)
self.display.grid(row=0, column=0, sticky='nesw')
self.Yscrollbar = tk.Scrollbar(self.display, orient=tk.VERTICAL, cursor='')
self.Yscrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.display.config(yscrollcommand=self.Yscrollbar.set)
self.Yscrollbar['command'] = self.display.yview
self.Xscrollbar = tk.Scrollbar(self.display, orient=tk.HORIZONTAL)
self.Xscrollbar.pack(side=tk.BOTTOM, fill=tk.X)
self.display.config(xscrollcommand=self.Xscrollbar.set)
self.Xscrollbar['command'] = self.display.xview
def open(self):
file = open(self.file, 'r', encoding='utf-8')
data = file.read()
file.close()
soup = BeautifulSoup(data, 'html.parser')
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text()
self.display.insert(tk.END, text)
self.display['state'] = tk.DISABLED
self.root.mainloop()
class SearchEngine:
"""
This search engine class searchs and ranks documents in the database
based on the given user query, and returns a list of urls.
"""
def __init__(self):
self.client = MongoClient("mongodb+srv://cs121se:cs121@cs121se-rlhbw.azure.mongodb.net/test?retryWrites=true")
self.database = self.client["SearchEngine"]
#self.collection = self.database["InvertedIndex"]
self.collection = self.database["Index"]
self.PS = PorterStemmer()
def search(self, query):
'''Returns a list of doc ids based on given query'''
# try:
# results = set()
# for q in query.split():
# database_results = self.collection.find_one({"term": self.PS.stem(q)})
# if (results == set()):
# results = set([d['doc'] for d in database_results["post"]])
# else:
# results = results & set([d['doc'] for d in database_results["post"]])
# except:
# results = []
# return list(results)
query = [self.PS.stem(q) for q in query.split()]
results = defaultdict(dict)
query_tfidf = dict()
for q in set(query):
postings = self._query_database(q)
if postings != None:
query_tfidf[q] = (1 + math.log10(query.count(q))) * math.log10(37497/len(postings["post"]))
for id,tf in postings["post"].items():
df = len(postings["post"])
tfidf = (1 + math.log10(tf)) * math.log10(37497/df)
results[id][q] = tfidf
else:
query_tfidf[q] = 0
score = dict()
for doc,tfidf in results.items():
score[doc] = self._cosine_similarity(query_tfidf, tfidf)[0][0]
top_twenty_results = []
k = 0
#for doc,_ in sorted(results.items(), key=lambda x: -x[1]):
for doc,_ in sorted(score.items(), key=lambda x: -x[1]):
#print(_,doc)
top_twenty_results.append(doc)
k += 1
if k == 20:
return top_twenty_results
return top_twenty_results
def _cosine_similarity(self, query, doc):
q_array = []
d_array = []
for q,score in query.items():
q_array.append(score)
try:
d_array.append(doc[q])
except:
d_array.append(0)
return cosine_similarity([q_array],[d_array])
def _query_database(self, query):
return self.collection.find_one({"_id": query})
class SearchEngineGUI:
"""
This class creates a tkinter GUI for the search engine.
"""
def __init__(self, search_engine):
self.corpus = Corpus()
self._search_engine = search_engine
self.root = tk.Tk()
self.root.title("INF141/CS121: Information Retrieval - Project3: Search Engine")
self.root.minsize(width = WINDOW_WIDTH, height = WINDOW_HEIGHT)
#self.root.maxsize(width = WINDOW_WIDTH, height = WINDOW_HEIGHT)
self.configure_grid()
self.create_widgets()
self.user_query = ''
self.search_results = []
def mainloop(self):
self.root.mainloop()
def configure_grid(self):
'''Configures the tkinter root frame'''
for i in range(1,9):
self.root.rowconfigure(i, weight = 1)
self.root.columnconfigure(0, weight = 10)
#self.root.columnconfigure(1, weight = 1)
self.root.columnconfigure(2, weight = 1)
def create_widgets(self):
'''Creates GUI tkinter widgets'''
self.query_entry = tk.Entry(self.root)
self.query_entry.grid(row=0,column=0, columnspan=2, sticky='ew')
self.query_entry.bind('<Return>', self._search)
self.query_entry.focus()
self.search_button = tk.Button(self.root, text="SEARCH", command=self._search, bg="cornflower blue")
self.search_button.grid(row=0,column=2, sticky='ew')
self.resultsBox = tk.Listbox(self.root)
self.resultsBox.grid(column=0, columnspan=3, row=1, rowspan=5, sticky='nesw')
self.resultsBox.bind('<Return>', self._open)
self.resultsBox.bind('<<ListboxSelect>>', self._get_descr)
self.resultsBox.bind('<Double-Button-1>', self._open)
self.query_entry.bind('<Down>', lambda event: self.resultsBox.focus())
self.scrollbar = tk.Scrollbar(self.resultsBox, orient=tk.VERTICAL)
self.scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
self.resultsBox.config(yscrollcommand=self.scrollbar.set)
self.scrollbar['command'] = self.resultsBox.yview
self.descriptionFrame = tk.Frame(self.root)
self.descriptionFrame.grid(row=6,rowspan=3,column=0,columnspan=3,sticky='nesw')
self.descriptionFrame.rowconfigure(0, weight = 1)
self.descriptionFrame.columnconfigure(0, weight = 1)
self.descriptionFrame.grid_propagate(False)
self.description = tk.Text(self.descriptionFrame,height=9,state=tk.DISABLED)
self.description.grid(row=0,column=0)
self.description.tag_config("underline", underline=1)
self.description.tag_config("query", background="yellow")
self.in_browser = tk.IntVar()
self.in_browser_check = tk.Checkbutton(self.root, text='in browser', variable=self.in_browser)
self.in_browser_check.grid(row=9,column=1)
self.open_button = tk.Button(self.root, text="OPEN", command=self._open, bg="pale goldenrod")
self.open_button.grid(row=9,column=0, sticky='ew')
self.cancel_button = tk.Button(self.root, text="CANCEL", command=self._cancel, bg="light coral")
self.cancel_button.grid(row=9,column=2, sticky='ew')
def _search(self, event=None):
'''Gives user query to the search engine and displays results'''
self.user_query = self.query_entry.get().strip().lower()
if self.user_query == '':
return
self._clear_search()
#self.search_results = [self.corpus.file_url_map[d['_id']] for d in self._search_engine.search(self.user_query)]
self.search_results = [self.corpus.file_url_map[id] for id in self._search_engine.search(self.user_query)]
for s in self.search_results:
try:
self.resultsBox.insert(tk.END, self.corpus.id_title_map[self.corpus.url_file_map[s]]+" -- "+s)
except:
self.resultsBox.insert(tk.END, s)
#print("# of results:", len(self.search_results))
#for s in self.search_results[0:20]:
# print(s)
def _get_descr(self, event=None):
'''Inserts selected page/url title and preview into description box'''
selected_index = self.resultsBox.curselection()
if selected_index == tuple():
return
url = self.search_results[selected_index[0]]
file = open(self.corpus.get_file_name(url), 'r', encoding="utf-8")
data = file.read()
file.close()
soup = BeautifulSoup(data, 'lxml')
self.description['state'] = tk.NORMAL
self.description.delete('1.0',tk.END)
try:
title = soup.title.string + '\n\n'
self.description.insert(tk.END, title, "underline")
except:
pass
query_list = self.user_query.split()
for script in soup(["script", "style"]):
script.extract()
text = soup.get_text().split()
text_stem = [self._search_engine.PS.stem(s.lower()) for s in text]
for q in query_list:
found = True
try:
index = text_stem.index(self._search_engine.PS.stem(q))
except:
try:
for i in range(len(text)):
p = "([A-Za-z]+"+q+")|("+q+"[A-Za-z]+)"
s = self._search_engine.PS.stem(q)
word = text[i].lower()
if (q in word and not re.search(p,word)):
index = i;
break;
else:
if not re.search("[A-Za-z]+"+s,word):
match = re.search(s+"[A-Za-z]*",word)
if match:
if self._search_engine.PS.stem(word[match.span()[0]:match.span()[1]]) == s:
index = i;
break;
except:
found = False
finally:
if found:
try:
desc = '...'
desc += " ".join(text[max(index-20,0):index])
self.description.insert(tk.END, desc+" ")
self.description.insert(tk.END, text[index], "query")
desc = " ".join(text[index+1:min(index+20,len(text))])
desc += '...\n'
self.description.insert(tk.END, " "+desc)
except:
pass
self.description['state'] = tk.DISABLED
def _open(self, event=None):
'''Opens the selected page/url'''
selected_index = self.resultsBox.curselection()
if selected_index == tuple():
return
url = self.search_results[selected_index[0]]
if (self.in_browser.get()):
webbrowser.open(url, new=2)
else:
page = FilePage(self.corpus.get_file_name(url), link=url)
page.open()
def _cancel(self):
'''Cancels and resets the search engine and GUI'''
self.user_query = ''
self.query_entry.delete(0, tk.END)
self.query_entry.focus()
self._clear_search()
def _clear_search(self):
'''Clears the searchbox'''
self.resultsBox.delete(0, tk.END)
self.search_results = []
self.description['state'] = tk.NORMAL
self.description.delete('1.0',tk.END)
self.description['state'] = tk.DISABLED
if __name__ == "__main__":
try:
search_engine = SearchEngine()
search_engine_gui = SearchEngineGUI(search_engine)
search_engine_gui.mainloop()
except:
import traceback
traceback.print_exc() | [
"skyn@uci.edu"
] | skyn@uci.edu |
014a53af66b673ff30e7f16c4b530d0334d76c90 | 72157f13029fb40f535e3710ce9fcb64f68b32d3 | /blog/forms.py | 443e76353a84ce2fa9dcc92ba64b79bca22021fc | [] | no_license | adityagoyal222/django-cloneblog | 3c006a0ef0c0d39db00b561a305e520f5b8af0f0 | 3b7c7d7931b96e92d427dd840f97fe30f9aaf39e | refs/heads/master | 2022-12-20T09:42:27.985580 | 2020-10-06T15:52:26 | 2020-10-06T15:52:26 | 301,113,022 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | from django import forms
from blog.models import Post, Comment
class PostForm(forms.ModelForm):
class Meta():
model = Post
fields = ('author', 'title', 'text')
widgets = {
'title': forms.TextInput(attrs={'class':'textInputClass'}),
'text': forms.Textarea(attrs={'class':'editable media-editor-textarea postcontent'})
}
class CommentForm(forms.ModelForm):
class Meta():
model = Comment
fields = ('author', 'text')
widgets = {
'author': forms.TextInput(attrs={'class': 'textInputClass'}),
'text': forms.Textarea(attrs={'class':'editable media-editor-textarea'})
}
| [
"43295337+adityagoyal222@users.noreply.github.com"
] | 43295337+adityagoyal222@users.noreply.github.com |
874acc4cd4bb1868affb3ce85b9d933a6ec0eaca | d2f5a055709645321e7b50b8530e3671356bf033 | /busboy/experiments/types.py | e58eb01a894b2b26216b3a462a14dd1e62bf377b | [] | no_license | rashmi2k6/busboy | 99f627cfea68277d03ee2e550feca2ded19fc5de | 46b489b3c04502d6ffa861d05b64e39198988e00 | refs/heads/master | 2022-04-11T18:23:51.939817 | 2019-04-01T20:13:16 | 2019-04-01T20:13:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,982 | py | from __future__ import annotations
import concurrent.futures as cf
import dataclasses
import datetime as dt
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Callable, Dict, Generic, Optional, Set, TypeVar
import busboy.apis as api
import busboy.constants as c
import busboy.database as db
import busboy.model as m
from busboy.util import Either, Maybe
T = TypeVar("T")
U = TypeVar("U")
@dataclass(frozen=True)
class PollResult(Generic[T]):
time: dt.datetime
results: Dict[m.StopId, T]
def filter(self, f: Callable[[T], bool]) -> "PollResult[T]":
return dataclasses.replace(
self, results={s: spr for s, spr in self.results.items() if f(spr)}
)
def map(self, f: Callable[[T], U]) -> "PollResult[U]":
return PollResult(
time=self.time, results={s: f(spr) for s, spr in self.results.items()}
)
@staticmethod
def to_json(
pr: PollResult[Either[Exception, m.StopPassageResponse]]
) -> Dict[str, Any]:
return {
"time": pr.time.isoformat(),
"results": {
s.raw: e.map(lambda spr: spr.to_json()).value
for s, e in pr.results.items()
},
}
@staticmethod
def from_json(j: Dict[str, Any]) -> "PollResult[m.StopPassageResponse]":
t = dt.datetime.fromisoformat(j["time"])
rs = {
m.StopId(s): m.StopPassageResponse.from_my_json(spr)
for s, spr in j["results"].items()
}
return PollResult(t, rs)
@staticmethod
def trips(
pr: PollResult[m.StopPassageResponse]
) -> PollResult[Set[Maybe[m.TripId]]]:
return pr.map(lambda spr: {p.trip for p in spr.passages})
@staticmethod
def all_trips(pr: PollResult[m.StopPassageResponse]) -> Set[Maybe[m.TripId]]:
return {t for ts in PollResult.trips(pr).results.values() for t in ts}
@staticmethod
def all_passages(pr: PollResult[m.StopPassageResponse]) -> Set[m.Passage]:
return {p for _, spr in pr.results.items() for p in spr.passages}
PresenceData = Dict["PassageTrip", PollResult[bool]]
@dataclass(frozen=True)
class PassageTrip(object):
"""All trip-specific information contained in a Passage."""
id: Optional[m.TripId]
route: Optional[m.RouteId]
vehicle: Optional[m.VehicleId]
latitude: Optional[float]
longitude: Optional[float]
bearing: Optional[int]
@staticmethod
def from_passage(p: m.Passage) -> "PassageTrip":
return PassageTrip(
p.trip, p.route, p.vehicle, p.latitude, p.longitude, p.bearing
)
@dataclass(frozen=True)
class StopCounts(object):
"""How many trips are covered by each stop."""
counts: Dict[m.StopId, int]
total: int
@dataclass(frozen=True)
class StopTrips(object):
"""The trips covered by each stop."""
trips: Dict[m.StopId, Set[Optional[m.TripId]]]
all_trips: Set[Optional[m.TripId]]
| [
"undergroundquizscene@gmail.com"
] | undergroundquizscene@gmail.com |
e4ca15c7d6e9970ed011775900e5cda0f08e74ef | 1b990e66e1c739e5e56c677298225232d752269a | /utils/preprocessing.py | 9e4c66d6636e7219f41abc12d737ed9850b84fef | [
"MIT"
] | permissive | Lleyton-Ariton/super-resolution | 468e5479629aa63711ffa2dce51d05d08817ece5 | 2772cd91e6d369233ee53428c6b0fc6061a3c3b4 | refs/heads/main | 2023-02-07T18:08:44.534423 | 2020-12-31T22:40:09 | 2020-12-31T22:40:09 | 325,628,859 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 948 | py | import os
import cv2
import torch
from torch.utils.data import Dataset
from typing import *
class ImageDataSet(Dataset):
IMAGE_FILES = [
'.png'
'.jpg',
'.jpeg',
]
@classmethod
def image_files(cls) -> List[str]:
return cls.IMAGE_FILES
def __init__(self, train_dir: str):
super().__init__()
self.train_dir = train_dir
image_names = os.listdir(self.train_dir)
self.data = []
for image_name in image_names:
img = cv2.imread(f'{self.train_dir}/{image_name}')
lr, hr = cv2.resize(img, (100, 100), cv2.INTER_CUBIC), cv2.resize(img, (400, 400))
self.data.append((
torch.from_numpy(lr).float(), torch.from_numpy(hr).float()
))
def __getitem__(self, item: int) -> (torch.Tensor, torch.Tensor):
return self.data[item]
def __len__(self) -> int:
return len(self.data)
| [
"lleytonariton@MacBookLleyton.local"
] | lleytonariton@MacBookLleyton.local |
4e746f96da01b3c31cde8ca5fd5703e427fb4f2d | e2a7f0ac4e5369e7e924029c1650a986716e78fc | /provisioning/ubuntu-trusty/config.py | 698b06100dd24a2e25e9602b22025650824fecf2 | [
"Unlicense"
] | permissive | reductus/reductus | f89566de60cda387fc20b1aba4210528c3bd535b | 07e865a08396b42fa7ae035de97628bc995506bc | refs/heads/main | 2023-05-22T15:08:10.730577 | 2023-05-12T16:08:49 | 2023-05-12T16:08:49 | 1,320,973 | 7 | 12 | Unlicense | 2022-09-30T03:23:50 | 2011-02-02T16:46:54 | Python | UTF-8 | Python | false | false | 863 | py | #############################################################
# rename or copy this file to config.py if you make changes #
#############################################################
# change this to your fully-qualified domain name to run a
# remote server. The default value of localhost will
# only allow connections from the same computer.
#jsonrpc_servername = "h3.umd.edu"
jsonrpc_servername = "localhost"
jsonrpc_port = 8001
http_port = 8000
serve_staticfiles = False
#use_redis = True
use_diskcache = True
diskcache_params = {"size_limit": int(4*2**30), "shards": 5}
use_msgpack = True
data_sources = [
{
"name": "ncnr",
"url": "https://www.ncnr.nist.gov/pub/",
"start_path": "ncnrdata"
},
]
file_helper_url = {
"ncnr": "https://www.ncnr.nist.gov/ipeek/listftpfiles.php"
}
instruments = ["refl", "ospec", "sans"]
| [
"brian.maranville@nist.gov"
] | brian.maranville@nist.gov |
2dcca01ee427cde3db10bb33f229ac89295893b3 | ba4e82ac08901ffa3b8ab341eb7f93b1b4212956 | /GoF/estruturais/bridge/devices/tv.py | 97e8aca95c25c4959b6c90a2187185f57886d08b | [] | no_license | danieladriano/padroes_projeto | a86c744ecdbad5f1234e583036f9d01f4205e4a8 | 437e67a93c21e91131a514e26778914c5c62c222 | refs/heads/main | 2023-04-28T13:42:04.651785 | 2021-05-24T23:37:18 | 2021-05-24T23:37:18 | 345,450,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 553 | py | from devices.device_interface import DeviceInterface
class Tv(DeviceInterface):
_enable = False
_volume = 0
def is_enable(self) -> bool:
return self._enable
def enable(self):
self._enable = True
def disable(self):
self._enable = False
def get_volume(self) -> int:
return self._volume
def set_volume(self, volume: int):
self._volume = volume
def get_channel(self) -> int:
return self._channel
def set_channel(self, channel: int):
self._channel = channel | [
"daniel.dadriano@gmail.com"
] | daniel.dadriano@gmail.com |
bb3be63122e471f50c9f7a6a33ffc59c1d679c8c | 815a86b4f5f191ef620d36149255d4e55a2033a6 | /bintopcd.py | 6c9ff4a396f014c0c9ce5c8e2bcbc767c86ae456 | [] | no_license | estelleaaa/bin_to_pcd | 9a7085e984f68cc4135c16eec46815f603dc83e5 | 386e6dd06a9d86a43ad1b737afd4911d714b625d | refs/heads/main | 2023-01-24T18:27:12.268131 | 2020-11-24T06:19:50 | 2020-11-24T06:19:50 | 315,270,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,419 | py | # 2 methods for converting bin to pcd files
import os
import os.path
import open3d as o3d
import struct
import numpy as np
import sys
'''
# method 1
def bin_to_pcd(path):
# read files from the path
for parents,dirs,files in os.walk(path):
for file in files:
filepath = os.path.join(parents,file)
# define float size is 4 byte
size_float = 4
list_pcd = []
# read each file contents
with open(filepath,'rb') as f:
# read bytes from file
byte = f.read(size_float*4)
print(sys.getsizeof(byte), ' this is size of byte--------------')
while byte:
# struct.unpack() is to format string
x, y, z, intensity = struct.unpack("ffff", byte)
list_pcd.append([x,y,z])
byte = f.read(size_float*4)
# create array
np_pcd = np.asarray(list_pcd)
# o3d.geometry.PointCloud() A point cloud consists of point coordinates, optionally point color and point normals
pcd = o3d.geometry.PointCloud()
# o3d.utility.Vector3dVector: convert float64 numpy array of shape(n,3) to open 3D format
v3d = o3d.utility.Vector3dVector
pcd.points = v3d(np_pcd)
# create filename
filename = './pcd/' + file.split('.')[0]+'.pcd'
# write the pcd file
# open3d.io.write_point_cloud(filename, pointcloud, write_ascii=False, compressed=False, print_progress=False
o3d.io.write_point_cloud(filename, pcd)
path = './bin01/'
bin_to_pcd(path)
'''
# method2
def bin_convert_pcd(path):
for parents, dirs, files in os.walk(path):
for file in files:
filepath=os.path.join(parents,file)
# load binary piunt cloud
# np.fromfile(file, dtype=float, count=1,sep='',offset=0) is a high way of reading binary data with a known data-type
bin_pcd = np.fromfile(filepath, dtype=np.float32)
# reshape
points = bin_pcd.reshape((-1, 4))[:,0:3]
o3d_pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(points))
filename = './pcd/' + file.split('.')[0]+'.pcd'
o3d.io.write_point_cloud(filename, o3d_pcd)
path = './bin01/'
bin_convert_pcd(path)
| [
"estelle_day@163.com"
] | estelle_day@163.com |
8ccff6248936f865286605b47d7abde532b87485 | e42443bc2cad5e8a6eabb7c22a41d54df37e25a6 | /PYTHON TKINTER -FREECODE CAMP/08_openfilesbox.py | 4e34910dd75a0e96eb2d84f8a516614c79225733 | [] | no_license | cedvillalbaag/tkinter | 18e6aadd9cbaab2ac955c32233f38749600a2b86 | cf3162ba2c982c1247c7825cdd3a86f09ba51d65 | refs/heads/main | 2023-04-13T00:41:21.939767 | 2023-04-06T16:24:43 | 2023-04-06T16:24:43 | 361,856,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,169 | py | #OPEN FILES DIALOG BOX
# Plantilla Basica de Tkinter
#Importar librería
from tkinter import *
from PIL import ImageTk, Image
from tkinter import messagebox
from tkinter import filedialog
root = Tk()
root.title("Code")
root.iconbitmap('a.ico')
#PNG Files "*.png"
#all files "*.*"
#root.filename = filedialog.askopenfilename(initialdir= "/Users/ASUS/Desktop", title= "Select a File", filetypes=(("JPG files","*.jpg"),("Word Files","*.docx")))
#Visualizar que retorna la variable - se apreciara la ruta del archivo
#my_label = Label(root, text=root.filename).pack()
#my_image = ImageTk.PhotoImage(Image.open(root.filename))
#my_image_label = Label(image=my_image).pack()
def open():
global my_image
root.filename = filedialog.askopenfilename(initialdir= "/Users/ASUS/Desktop", title= "Select a File", filetypes=(("JPG files","*.jpg"),("Word Files","*.docx")))
my_label = Label(root, text=root.filename).pack()
my_image = ImageTk.PhotoImage(Image.open(root.filename))
my_image_label = Label(image=my_image).pack()
my_btn = Button(root, text="OpenFile", command=open).pack()
#Bucle infinito
root.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
ded4a6f8270f5167ee4a1f901cc3a54f82a254b7 | 0fec7986cb02142a29858c54c0c83bbb25c85774 | /orttraining/orttraining/test/python/orttraining_test_ortmodule_autograd.py | 8b5ed034bbd092ca1cde63e1288387c97082fb93 | [
"MIT"
] | permissive | RyanUnderhill/onnxruntime | 98f6d182e0833ee156cbf4b6984b927f13171a26 | 6df4e293ffbb47d739d2dedfbb87fa6234b8c37c | refs/heads/master | 2023-08-17T21:25:40.390364 | 2021-08-30T18:10:00 | 2021-08-30T22:37:40 | 401,530,394 | 0 | 0 | MIT | 2021-08-31T02:30:32 | 2021-08-31T01:02:05 | null | UTF-8 | Python | false | false | 34,591 | py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# Import external libraries.
import onnxruntime
import pytest
import torch
from torch.nn.parameter import Parameter
from distutils.version import LooseVersion
# Import ORT modules.
from _test_helpers import *
from onnxruntime.training.ortmodule import ORTModule
torch.manual_seed(1)
onnxruntime.set_seed(1)
def torch_version_lower_than(v):
return LooseVersion(torch.__version__) < LooseVersion(v)
def test_GeLU():
@torch.jit.script
def bias_gelu(bias, y):
x = bias + y
return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
@torch.jit.script
def bias_gelu_backward(g, bias, y):
x = bias + y
tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 +
0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)
return ff*g
class GeLUFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, bias):
ctx.save_for_backward(input, bias)
return bias_gelu(bias, input)
@staticmethod
def backward(ctx, grad_output):
input, bias = ctx.saved_tensors
tmp = bias_gelu_backward(grad_output, bias, input)
return tmp, tmp
class GeLUModel(torch.nn.Module):
def __init__(self, output_size):
super(GeLUModel, self).__init__()
self.relu = GeLUFunction.apply
self.bias = Parameter(torch.empty(
output_size,
device=torch.cuda.current_device(),
dtype=torch.float))
with torch.no_grad():
self.bias.uniform_()
def forward(self, model_input):
out = self.relu(model_input, self.bias)
return out
output_size = 1024
def model_builder():
return GeLUModel(output_size)
def input_generator():
return torch.randn(output_size, dtype=torch.float)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
run_training_test_and_compare(model_builder, input_generator, label_input)
def test_MegatronF():
# MegatronGFunction is tested in distributed test files.
class MegatronFFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input_):
return input_
@staticmethod
def backward(ctx, grad_output):
# Bypass the reduce as if we are using only 1 GPU.
return grad_output
class MegatronFModel(torch.nn.Module):
def __init__(self, output_size):
super(MegatronFModel, self).__init__()
self.copy_ = MegatronFFunction.apply
self.bias = Parameter(torch.empty(
output_size,
device=torch.cuda.current_device(),
dtype=torch.float))
with torch.no_grad():
self.bias.uniform_()
def forward(self, model_input):
model_input = model_input + self.bias
out = self.copy_(model_input)
return out
output_size = 1024
def model_builder():
return MegatronFModel(output_size)
def input_generator():
return torch.randn(output_size, dtype=torch.float)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
run_training_test_and_compare(model_builder, input_generator, label_input)
def test_ScalarAndTuple():
class ScalarAndTupleFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input, alpha, beta, gamma):
ctx.save_for_backward(input)
ctx.alpha = alpha
ctx.beta = beta
ctx.gamma = gamma
return alpha * beta[0] * beta[1] * gamma * input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
alpha = ctx.alpha
beta = ctx.beta
gamma = ctx.gamma
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return alpha * beta[0] * beta[1] * gamma * grad_input, None, None, None
class ScalarAndTupleModel(torch.nn.Module):
def __init__(self, output_size):
super(ScalarAndTupleModel, self).__init__()
self.activation = ScalarAndTupleFunction.apply
self.linear_a = torch.nn.Linear(output_size, output_size)
self.linear_b = torch.nn.Linear(output_size, output_size)
def forward(self, x):
h = self.linear_a(x)
h = self.activation(h, 5.0, (-1.0, 2.0), -1.0)
h = self.linear_b(h)
return h
output_size = 2
def model_builder():
return ScalarAndTupleModel(output_size)
def input_generator():
return torch.randn(output_size, dtype=torch.float)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
run_training_test_and_compare(model_builder, input_generator, label_input)
def test_ScalarAndTupleReordered():
class ScalarAndTupleReorderedFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, alpha, beta, input, gamma):
ctx.save_for_backward(input)
ctx.alpha = alpha
ctx.beta = beta
ctx.gamma = gamma
return alpha * beta[0] * beta[1] * gamma * input.clamp(min=0)
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
alpha = ctx.alpha
beta = ctx.beta
gamma = ctx.gamma
grad_input = grad_output.clone()
grad_input[input < 0] = 0
return None, None, alpha * beta[0] * beta[1] * gamma * grad_input, None
class ScalarAndTupleReorderedModel(torch.nn.Module):
def __init__(self, output_size):
super(ScalarAndTupleReorderedModel, self).__init__()
self.activation = ScalarAndTupleReorderedFunction.apply
self.linear_a = torch.nn.Linear(output_size, output_size)
self.linear_b = torch.nn.Linear(output_size, output_size)
def forward(self, x):
h = self.linear_a(x)
h = self.activation(5.0, (-1.0, 2.0), h, -1.0)
h = self.linear_b(h)
return h
output_size = 2
def model_builder():
return ScalarAndTupleReorderedModel(output_size)
def input_generator():
return torch.randn(output_size, dtype=torch.float)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
run_training_test_and_compare(model_builder, input_generator, label_input)
@pytest.mark.skip(reason="This test is not correct. All tensors modified by in-place operattions should be mark_dirty(...).")
def test_InplaceUpdateInputAsOutputNotRequireGrad():
class InplaceUpdateInputAsOutputNotRequireGradFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, bias, inplace_update_input):
# without mark_ditry, the inner computation graph is extracted into
# another subgraph, which is a duplicated computation with the PythonOp.
# so for the weights that are used twice BUT SHOULD only used once,
# the gradients are almost 2x than PyTorch's grad, this is the reason we
# ignore the gradient compare here.
ctx.save_for_backward(inplace_update_input, bias)
return inplace_update_input.add_(3 * bias)
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class InplaceUpdateInputAsOutputNotRequireGradModel(torch.nn.Module):
def __init__(self, output_size):
super(InplaceUpdateInputAsOutputNotRequireGradModel, self).__init__()
self.inplace_op = InplaceUpdateInputAsOutputNotRequireGradFunction.apply
self.bias = Parameter(torch.empty(
output_size,
device=torch.cuda.current_device(),
dtype=torch.float))
with torch.no_grad():
self.bias.uniform_()
def forward(self, model_input):
x = model_input.mul(2)
y1 = self.inplace_op(self.bias, x) # x did not require grad
y2 = x.add(self.bias)
out = y1 + y2
return out
output_size = 1024
def model_builder():
return InplaceUpdateInputAsOutputNotRequireGradModel(output_size)
def input_generator():
return torch.randn(output_size, dtype=torch.float)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
# Test when input is in-place updated, but does not require gradient.
run_training_test_and_compare(
model_builder, input_generator, label_input, ignore_grad_compare=True)
@pytest.mark.skip(reason="This test is not correct. All tensors modified by in-place operattions should be mark_dirty(...).")
def test_InplaceUpdateInputNotAsOutputNotRequireGrad():
class InplaceUpdateInputNotAsOutputNotRequireGradFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, bias, inplace_update_input):
ctx.save_for_backward(inplace_update_input, bias)
inplace_update_input.add_(3 * bias)
return inplace_update_input * 5
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class InplaceUpdateInputNotAsOutputNotRequireGradModel(torch.nn.Module):
def __init__(self, output_size):
super(InplaceUpdateInputNotAsOutputNotRequireGradModel, self).__init__()
self.inplace_op = InplaceUpdateInputNotAsOutputNotRequireGradFunction.apply
self.bias = Parameter(torch.empty(
output_size,
device=torch.cuda.current_device(),
dtype=torch.float))
with torch.no_grad():
self.bias.uniform_()
def forward(self, model_input):
x = model_input.mul(2)
y1 = self.inplace_op(self.bias, x)
y2 = x.add(self.bias)
out = y1 + y2
return out
output_size = 1024
def model_builder():
return InplaceUpdateInputNotAsOutputNotRequireGradModel(output_size)
def input_generator():
return torch.randn(output_size, dtype=torch.float)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
# Without mark_ditry, the inner computation graph is extracted into another subgraph, which is a duplicated computation with the PythonOp.
# So for the weights that are used twice BUT SHOULD only used once, the gradients are almost 2x than PyTorch's grad, this is the reason we
# ignore the gradient compare here.
run_training_test_and_compare(
model_builder, input_generator, label_input, ignore_grad_compare=True)
def test_InplaceUpdateInputAsOutputNotRequireGradWithMarkDirty():
class InplaceUpdateInputAsOutputNotRequireGradWithMarkDirtyFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, bias, inplace_update_input):
ctx.save_for_backward(inplace_update_input, bias)
ctx.mark_dirty(inplace_update_input)
# Be noted: if we make the input dirty, we must also put the input in outputs, otherwise, we will get such an error:
# "RuntimeError: Some elements marked as dirty during the forward method were not returned as output.
# The inputs that are modified inplace must all be outputs of the Function.""
return inplace_update_input.add_(3 * bias)
@staticmethod
def backward(ctx, grad_output):
# Bypass the reduce if we are using only 1 GPU.
return grad_output, None
class InplaceUpdateInputAsOutputNotRequireGradWithMarkDirtyModel(torch.nn.Module):
def __init__(self, output_size):
super(InplaceUpdateInputAsOutputNotRequireGradWithMarkDirtyModel,
self).__init__()
self.inplace_op = InplaceUpdateInputAsOutputNotRequireGradWithMarkDirtyFunction.apply
self.bias = Parameter(torch.empty(
output_size,
device=torch.cuda.current_device(),
dtype=torch.float))
with torch.no_grad():
self.bias.uniform_()
def forward(self, model_input):
x = model_input.mul(2)
y1 = self.inplace_op(self.bias, x)
y2 = x.add(self.bias)
out = y1 + y2
return out
output_size = 1024
def model_builder():
return InplaceUpdateInputAsOutputNotRequireGradWithMarkDirtyModel(output_size)
def input_generator():
return torch.randn(output_size, dtype=torch.float)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
run_training_test_and_compare(model_builder, input_generator, label_input)
@pytest.mark.skip(reason="This test is not correct. All tensors modified by in-place operattions should be mark_dirty(...).")
def test_InplaceUpdateInputAsOutputRequireGrad():
class InplaceUpdateInputAsOutputRequireGradFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, bias, inplace_update_input):
ctx.save_for_backward(inplace_update_input, bias)
# Be noted: if we make the input dirty, we must also put the input in outputs, otherwise, we will get such an error:
# "RuntimeError: Some elements marked as dirty during the forward method were not returned as output. The inputs that are modified inplace must all be outputs of the Function.""
return inplace_update_input.add_(3 * bias)
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
class InplaceUpdateInputAsOutputRequireGradModel(torch.nn.Module):
def __init__(self, output_size):
super(InplaceUpdateInputAsOutputRequireGradModel, self).__init__()
self.inplace_op = InplaceUpdateInputAsOutputRequireGradFunction.apply
self.bias = Parameter(torch.empty(
output_size,
device=torch.cuda.current_device(),
dtype=torch.float))
with torch.no_grad():
self.bias.uniform_()
def forward(self, model_input):
x = model_input + self.bias
y1 = self.inplace_op(self.bias, x)
y2 = x.add(self.bias)
out = y1 + y2
return out
output_size = 1024
def model_builder():
return InplaceUpdateInputAsOutputRequireGradModel(output_size)
def input_generator():
return torch.randn(output_size, dtype=torch.float)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
# Test when input is in-place updated, but does require gradient.
#
# without mark_ditry, the inner computation graph is extracted into another subgraph, which is a
# duplicated computation with the PythonOp. Thus, for the weights that are used twice BUT SHOULD
# only used once, the gradients are almost 2x than PyTorch's grad, this is the reason we
# ignore the gradient compare here.
run_training_test_and_compare(
model_builder, input_generator, label_input, ignore_grad_compare=True)
@pytest.mark.skip(reason="This test is not correct. All tensors modified by in-place operattions should be mark_dirty(...).")
def test_InplaceUpdateInputNotAsOutputRequireGrad():
class InplaceUpdateInputNotAsOutputRequireGradFunction(torch.autograd.Function):
# without mark_ditry, the inner computation graph is extracted into another subgraph, which is a duplicated computation with the PythonOp.
# so for the weights that are used twice BUT SHOULD only used once, the gradients are almost 2x than PyTorch's grad, this is the reason we
# ignore the gradient compare here.
@staticmethod
def forward(ctx, bias, inplace_update_input):
ctx.save_for_backward(inplace_update_input, bias)
inplace_update_input.add_(3 * bias)
return inplace_update_input * 5
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
class InplaceUpdateInputNotAsOutputRequireGradModel(torch.nn.Module):
def __init__(self, output_size):
super(InplaceUpdateInputNotAsOutputRequireGradModel, self).__init__()
self.inplace_op = InplaceUpdateInputNotAsOutputRequireGradFunction.apply
self.bias = Parameter(torch.empty(
output_size,
device=torch.cuda.current_device(),
dtype=torch.float))
with torch.no_grad():
self.bias.uniform_()
def forward(self, model_input):
x = model_input + self.bias
y1 = self.inplace_op(self.bias, x)
y2 = x.add(self.bias)
out = y1 + y2
return out
output_size = 1024
def model_builder():
return InplaceUpdateInputNotAsOutputRequireGradModel(output_size)
def input_generator():
return torch.randn(output_size, dtype=torch.float)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
# This case is known to have an warning message: "The output torch tensor @140214094625024, 140212816617984
# should reuse the input torch tensor @140214095996104, 140212816617984 but actually not." It seems
# if we don't have mark_dirty() in auto grad forward, the result is not using the input_,
# (maybe a view of it, because data address is same)
run_training_test_and_compare(
model_builder, input_generator, label_input, ignore_grad_compare=True)
##########################################################################################
def test_InplaceUpdateInputAsOutputRequireGradWithMarkDirty():
class InplaceUpdateInputAsOutputRequireGradWithMarkDirtyFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, bias, inplace_update_input):
ctx.save_for_backward(inplace_update_input, bias)
ctx.mark_dirty(inplace_update_input)
# Be noted: if we make the input dirty, we must also put the input in outputs,
# otherwise, we will get such an error:
# "RuntimeError: Some elements marked as dirty during the forward method were not returned as output.
# The inputs that are modified inplace must all be outputs of the Function.""
return inplace_update_input.add_(3 * bias)
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
class InplaceUpdateInputAsOutputRequireGradWithMarkDirtyModel(torch.nn.Module):
def __init__(self, output_size):
super(InplaceUpdateInputAsOutputRequireGradWithMarkDirtyModel,
self).__init__()
self.inplace_op = InplaceUpdateInputAsOutputRequireGradWithMarkDirtyFunction.apply
self.bias = Parameter(torch.empty(
output_size,
device=torch.cuda.current_device(),
dtype=torch.float))
with torch.no_grad():
self.bias.uniform_()
def forward(self, model_input):
x = model_input + self.bias
y1 = self.inplace_op(self.bias, x)
y2 = x.add(self.bias)
out = y1 + y2
return out
output_size = 1024
def model_builder():
return InplaceUpdateInputAsOutputRequireGradWithMarkDirtyModel(output_size)
def input_generator():
return torch.randn(output_size, dtype=torch.float)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
run_training_test_and_compare(model_builder, input_generator, label_input)
def test_EvalTest():
class EvalTestFunction(torch.autograd.Function):
@staticmethod
# bias is an optional argument
def forward(ctx, x):
ctx.save_for_backward(x)
return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_tensors
return None
class EvalTestModel(torch.nn.Module):
def __init__(self, output_size):
super(EvalTestModel, self).__init__()
self.custom_fn = EvalTestFunction.apply
self.bias = Parameter(torch.empty(
output_size,
device=torch.cuda.current_device(),
dtype=torch.float))
with torch.no_grad():
self.bias.uniform_()
def forward(self, model_input):
# model_input did not require_grad
out = self.custom_fn(model_input)
return out + self.bias
output_size = 1024
def model_builder():
return EvalTestModel(output_size)
def input_generator():
return torch.randn(output_size, dtype=torch.float)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
# Test pure inferencing scenarios, when inputs don't requires_grad.
run_evaluate_test_and_compare(model_builder, input_generator, label_input)
@pytest.mark.skipif(torch_version_lower_than("1.10.0"),
reason='PyTorch older than 1.10.0 has bugs for exporting multiple output custom function')
def test_TwoOutputFunction():
class TwoOutputFunction(torch.autograd.Function):
@staticmethod
# bias is an optional argument
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
w = x + y
z = x * y
return w, z
@staticmethod
def backward(ctx, dw, dz):
x, y = ctx.saved_tensors
# Based on chain rule, we can drive Jacobian
# of this function.
# dL/dx = dL/dw * dw/dx + dL/dz * dz/dx
# where
# dw/dx = 1
# dz/dx = y
# Thus, dL/dx can be computed using the
# following line. Note that dL is omitted
# for convenience.
dx = dw * 1.0 + dz * y
# Similarly, we drive and then implement
# the Jacobian for dy using chain rule
# dL/dw = dL/dw * dw/dy + dL/dz * dz/dy
# where
# dw/dy = 1
# dz/dy = x
dy = dw * 1.0 + dz * x
return dx, dy
class TwoOutputModel(torch.nn.Module):
def __init__(self, output_size):
super(TwoOutputModel, self).__init__()
self.fun = TwoOutputFunction.apply
self.bias = Parameter(torch.empty(
output_size,
device=torch.cuda.current_device(),
dtype=torch.float))
with torch.no_grad():
self.bias.uniform_()
def forward(self, x):
a, b = self.fun(x, self.bias)
return a + b
output_size = 2
def model_builder():
return TwoOutputModel(output_size)
def input_generator():
return torch.randn(output_size, dtype=torch.float)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
# Test multi-input and multi-output custom function.
run_training_test_and_compare(model_builder, input_generator, label_input)
def test_InnerModuleCall():
class InnerModel(torch.nn.Module):
def __init__(self, dim, device):
super(InnerModel, self).__init__()
self.bias = Parameter(torch.FloatTensor([1.0] * dim).to(device))
def forward(self, x):
z = 0.5 * x * x + self.bias
return z
class OuterFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, x, dim, device, use_ort):
ctx.save_for_backward(x)
ctx.device = device
ctx.inner = InnerModel(dim, device).to(device)
if use_ort:
enable_custom_autograd_function(ctx.inner)
ctx.inner = ORTModule(ctx.inner)
z = ctx.inner(x)
return z
@staticmethod
def backward(ctx, dv):
x, = ctx.saved_tensors
y = x.detach().to(ctx.device)
y.requires_grad = True
g = None
with torch.enable_grad():
z = ctx.inner(y)
z.backward(dv)
g = y.grad.detach()
return g, None, None, None
class OuterModel(torch.nn.Module):
def __init__(self, dim, device, use_ort):
super(OuterModel, self).__init__()
self.fun = OuterFunction.apply
self.dim = dim
self.device = device
self.use_ort = use_ort
self.bias = Parameter(torch.FloatTensor([1.0] * dim).to(device))
with torch.no_grad():
self.bias.uniform_()
def forward(self, x):
z = self.fun(x + self.bias, self.dim, self.device, self.use_ort)
return z
def get_inner_module_call_result(x, device, use_ort):
torch.manual_seed(0)
x = x.to(device)
x.requires_grad = True
model = OuterModel(2, device, use_ort)
y = model(x).sum()
y.backward()
return y.detach(), x.grad.detach()
x = torch.FloatTensor([1.0, -1.0])
# Test indirect ORTModule call from custom function
result_pth = get_inner_module_call_result(x.detach(), 'cuda:0', False)
result_ort = get_inner_module_call_result(x.detach(), 'cuda:0', True)
compare_tensor_list(result_ort, result_pth)
# Test indirect ORTModule call from custom function
result_ort = get_inner_module_call_result(x.detach(), 'cpu', True)
result_pth = get_inner_module_call_result(x.detach(), 'cpu', False)
compare_tensor_list(result_ort, result_pth)
@pytest.mark.skipif(torch_version_lower_than("1.10.0"),
reason='PyTorch older than 1.10.0 has bugs for exporting multiple output custom function')
def test_Share_Input():
class TwoOutputFunction(torch.autograd.Function):
@staticmethod
# bias is an optional argument
def forward(ctx, x, y):
ctx.save_for_backward(x, y)
w = x + y
z = x * y
return w, z
@staticmethod
def backward(ctx, dw, dz):
x, y = ctx.saved_tensors
dx = dw * 1.0 + dz * y
dy = dw * 1.0 + dz * x
return dx, dy
class TwoOutputModel(torch.nn.Module):
def __init__(self, output_size):
super(TwoOutputModel, self).__init__()
self.fun = TwoOutputFunction.apply
self.bias = Parameter(torch.empty(
output_size,
device=torch.cuda.current_device(),
dtype=torch.float))
with torch.no_grad():
self.bias.uniform_()
def forward(self, x):
a, b = self.fun(x, self.bias)
c, d = self.fun(x, self.bias)
return a + b + c + d
output_size = 2
def model_builder():
return TwoOutputModel(output_size)
def input_generator():
return torch.randn(output_size, dtype=torch.float)
def input_generator_with_requires_grad():
return torch.randn(output_size, dtype=torch.float).requires_grad_()
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
# Test multi-input and multi-output custom function.
run_training_test_and_compare(model_builder, input_generator, label_input)
run_training_test_and_compare(model_builder, input_generator_with_requires_grad, label_input)
def test_MultipleStream_InForwardFunction():
class MultipleStreamFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
default_stream = torch.cuda.current_stream()
ctx.save_for_backward(input)
stream = torch.cuda.Stream()
torch.cuda._sleep(1000 * 1000)
input = input * 0.2
# on different stream
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
input= input * 2
default_stream.wait_stream(stream)
return input
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output
class MultipleStreamModel(torch.nn.Module):
def __init__(self, output_size):
super(MultipleStreamModel, self).__init__()
self.relu = MultipleStreamFunction.apply
def forward(self, model_input):
b = model_input * 0.2
out = self.relu(b)
return out
output_size = 2
def model_builder():
return MultipleStreamModel(output_size)
def input_generator():
return torch.tensor([2.8, 3.4], requires_grad=True)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
# Test multi-input and multi-output custom function.
run_training_test_and_compare(model_builder, input_generator, label_input,
expected_outputs=[torch.tensor([0.224, 0.272])])
def test_NonDefaultStream_InForwardFunction1():
class MultipleStreamFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
# on different stream
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
ctx.save_for_backward(input)
input = input * 0.4
default_stream.wait_stream(stream)
return input
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output
class MultipleStreamModel(torch.nn.Module):
def __init__(self, output_size):
super(MultipleStreamModel, self).__init__()
self.relu = MultipleStreamFunction.apply
def forward(self, model_input):
model_input = model_input * 0.2
torch.cuda._sleep(1000 * 1000)
out = self.relu(model_input)
return out
output_size = 2
def model_builder():
return MultipleStreamModel(output_size)
def input_generator():
return torch.tensor([2.8, 3.4], requires_grad=True)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
# Test multi-input and multi-output custom function.
run_training_test_and_compare(model_builder, input_generator, label_input,
expected_outputs=[torch.tensor([0.224, 0.272])])
def test_NonDefaultStream_InForwardFunction2():
class MultipleStreamFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
torch.cuda._sleep(1000 * 1000)
input = input * 0.4
return input
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output
class MultipleStreamModel(torch.nn.Module):
def __init__(self, output_size):
super(MultipleStreamModel, self).__init__()
self.relu = MultipleStreamFunction.apply
def forward(self, model_input):
model_input = model_input * 0.2
stream = torch.cuda.Stream()
default_stream = torch.cuda.current_stream()
# on different stream
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
out = self.relu(model_input)
default_stream.wait_stream(stream)
return out
output_size = 2
def model_builder():
return MultipleStreamModel(output_size)
def input_generator():
return torch.tensor([2.8, 3.4], requires_grad=True)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
# Test multi-input and multi-output custom function.
run_training_test_and_compare(model_builder, input_generator, label_input,
expected_outputs=[torch.tensor([0.224, 0.272])])
def test_NonDefaultStreamInplaceUpdate_InForwardFunction():
class MultipleStreamFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
# on different stream
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
ctx.save_for_backward(input)
input.mul_(0.4)
ctx.mark_dirty(input)
default_stream.wait_stream(stream)
return input
@staticmethod
def backward(ctx, grad_output):
input, = ctx.saved_tensors
return grad_output
class MultipleStreamModel(torch.nn.Module):
def __init__(self, output_size):
super(MultipleStreamModel, self).__init__()
self.relu = MultipleStreamFunction.apply
def forward(self, model_input):
model_input = model_input * 0.2
torch.cuda._sleep(1000 * 1000)
out = self.relu(model_input)
return out
output_size = 2
def model_builder():
return MultipleStreamModel(output_size)
def input_generator():
return torch.tensor([2.8, 3.4], requires_grad=True)
# generate a label that have same shape as forward output.
label_input = torch.ones([output_size])
# Test multi-input and multi-output custom function.
run_training_test_and_compare(model_builder, input_generator, label_input,
expected_outputs=[torch.tensor([0.224, 0.272])])
| [
"noreply@github.com"
] | noreply@github.com |
e5e3f29cb08bf189ba52f5187375038a2eccff04 | f3255cdb954a23169d6a76e5ae64f083a30c2fa6 | /pyplot/plot_disk_benchmarks.py | 404b5c408bacee19c8c8a480018dd6e3845f9dd2 | [] | no_license | dgaer/benchmarks | 35d775c0d24da59f533fa9adacfd56c90ed53c2b | b5160b458389d3a984a24cd7950b50a63d3dc4e9 | refs/heads/master | 2021-01-19T23:41:08.846216 | 2019-01-08T20:06:01 | 2019-01-08T20:06:01 | 89,008,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,689 | py | ##!/usr/bin/python
# *******************************
# ******** Start of File ********
# *******************************
# -----------------------------------------------------------
# Python Script
# Operating System(s): RHEL 6, 7
# Python version used: 2.6.x, 2.7.x, numpy, matplotlib
# Original Author(s): Douglas.Gaer@noaa.gov
# File Creation Date: 04/23/2016
# Date Last Modified: 04/24/2016
#
# Version control: 1.01
#
# Support Team:
#
# Contributors:
# -----------------------------------------------------------
# ------------- Program Description and Details -------------
# -----------------------------------------------------------
#
# Plot script for disk speed benchmaks
#
# -----------------------------------------------------------
import sys
import os
import time
# Output our program setup and ENV
PYTHON = os.environ.get('PYTHON')
PYTHONPATH = os.environ.get('PYTHONPATH')
print ' Python ploting program: ' + sys.argv[0]
if not PYTHON:
print 'WARNING - PYTHON variable not set in callers ENV'
else:
print ' Python interpreter: ' + PYTHON
if not PYTHONPATH:
print 'WARNING - PYTHONPATH variable not set in callers ENV'
else:
print ' Python path: ' + PYTHONPATH
import datetime
import numpy as np
import ConfigParser
# Generate images without having a window appear
# http://matplotlib.org/faq/howto_faq.html
INTERACTIVE = True
if len(sys.argv) > 2:
INTERACTIVE = False
import matplotlib
if not INTERACTIVE:
matplotlib.use('Agg')
from pylab import *
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.font_manager import fontManager, FontProperties
from mpl_toolkits.basemap import Basemap
import matplotlib.dates as mdate
import datetime as dt
if len(sys.argv) < 2:
print 'ERROR - You must supply the name of our input control config file'
print 'Usage 1: ' + sys.argv[0] + ' disk_benchmark.dat'
print 'Usage 2: ' + sys.argv[0] + ' disk_benchmark.dat disk_benchmark.png'
sys.exit()
Config = ConfigParser.ConfigParser()
fname = sys.argv[1]
if os.path.isfile(fname):
print "Reding pyplot CFG file: " + fname
Config.read(fname)
else:
print 'ERROR - Cannot open pyplot CFG file ' + fname
sys.exit()
if not Config.has_section("DISKBENCHMARK"):
print 'ERROR - Pyplot CFG file missing DISKBENCHMARK section'
sys.exit()
if not Config.has_section("PLOTINFO"):
print 'ERROR - Pyplot CFG file missing PLOTINFO section'
sys.exit()
num_inserts = Config.getint('DISKBENCHMARK', 'num_inserts')
num_inserts_str = Config.get('DISKBENCHMARK', 'num_inserts_str')
write_time = Config.get('DISKBENCHMARK', 'write_time').split(",")
write_time_avg = Config.get('DISKBENCHMARK', 'write_time_avg').split(",")
xticks = Config.get('DISKBENCHMARK', 'xticks').split(",")
xticks_str = Config.get('DISKBENCHMARK', 'xticks_str').split(",")
insert_time = Config.getfloat('DISKBENCHMARK', 'insert_time')
insert_time_avg = Config.getfloat('DISKBENCHMARK', 'insert_time_avg')
read_time = Config.get('DISKBENCHMARK', 'read_time').split(",")
read_time_avg = Config.get('DISKBENCHMARK', 'read_time_avg').split(",")
search_time = Config.getfloat('DISKBENCHMARK', 'search_time')
search_time_avg = Config.getfloat('DISKBENCHMARK', 'search_time_avg')
remove_time = Config.get('DISKBENCHMARK', 'remove_time').split(",")
remove_time_avg = Config.get('DISKBENCHMARK', 'remove_time_avg').split(",")
delete_time = Config.getfloat('DISKBENCHMARK', 'delete_time')
delete_time_avg = Config.getfloat('DISKBENCHMARK', 'delete_time_avg')
rewrite_time = Config.get('DISKBENCHMARK', 'rewrite_time').split(",")
rewrite_time_avg = Config.get('DISKBENCHMARK', 'rewrite_time_avg').split(",")
reinsert_time = Config.getfloat('DISKBENCHMARK', 'reinsert_time')
reinsert_time_avg = Config.getfloat('DISKBENCHMARK', 'reinsert_time_avg')
DPI = Config.getint('PLOTINFO', 'DPI')
plot_type = Config.get('PLOTINFO', 'plot_type')
if plot_type == 'averages':
plt.plot(write_time_avg, color='blue', label="Write time")
plt.plot(read_time_avg, color='green', label="Read time")
plt.plot(remove_time_avg, color='red', label="Delete time")
plt.plot(rewrite_time_avg, color='darkblue', label="Rewrite time")
plt.ylabel('Average Write/Read/Delete (seconds)', fontsize=12)
else:
plt.plot(write_time, color='blue', label="Write time")
plt.plot(read_time, color='green', label="Read time")
plt.plot(remove_time, color='red', label="Delete time")
plt.plot(rewrite_time, color='darkblue', label="Rewrite time")
plt.ylabel('Write/Read/Delete (seconds)', fontsize=12)
plt.legend(loc='upper right', fancybox=True, shadow=True, prop=dict(size=8,weight='bold'),ncol=4)
plt.grid(True, which="major", linestyle="dotted")
ind = np.arange(len(xticks_str))
plt.xticks(ind, xticks_str, rotation=75)
plt.xlabel('Number of Btree Index Keys', fontsize=12)
plot_title = "Btree Disk Speed Test For " + num_inserts_str + " Keys"
if Config.has_section("HOSTINFO"):
host = Config.get('HOSTINFO', 'host')
date = Config.get('HOSTINFO', 'date')
build = Config.get('HOSTINFO', 'build')
title = Config.get('HOSTINFO', 'title')
plot_title = title +': '+ host +' '+ date +' '+ build +' build'
plt.title(plot_title, fontsize=12,weight='bold')
plt.subplots_adjust(hspace=.23, left=.14, bottom=.16, right=.91, top=.90, wspace=.23)
if not INTERACTIVE:
plotimg = sys.argv[2]
print 'Plot file = ' + plotimg
plt.savefig(plotimg,dpi=DPI,bbox_inches='tight',pad_inches=0.1)
if INTERACTIVE:
plt.show()
print 'Plot complete'
plt.clf()
# -----------------------------------------------------------
# *******************************
# ********* End of File *********
# *******************************
| [
"Douglas.Gaer@noaa.gov"
] | Douglas.Gaer@noaa.gov |
00d013ba9687410318ea2bdddc37aaee330f8bd5 | e5c7a2cb734d7b6a9827cea4b3e9e68ad136b85b | /main.py | 8c4067b83eb6cb457164fc1b76cd6b9bfb7687d0 | [] | no_license | infraredZ/PyQT5 | 8512d0680ea8c1cf48c12eda6a87016b76f90644 | ee79ccc61e1e02b3bfe92b89e5a26fcc5adf6925 | refs/heads/master | 2020-12-28T04:03:12.836411 | 2020-02-04T09:26:34 | 2020-02-04T09:26:34 | 238,171,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | from PyQt5.QtWidgets import QApplication
import myMainWindow
import sys
if __name__ == '__main__':
app = QApplication(sys.argv)
mainwindow = myMainWindow.myMainWindow()
mainwindow.show()
sys.exit(app.exec_())
| [
"15202861280@163.com"
] | 15202861280@163.com |
21e1b21c1fef83bcf99bbebfc235137963e6ae58 | 4c97433643879cc2386f5d0c6bf586f35fe3b075 | /create_song_text_files/main.py | a9d9fddbd87771b26a542e8cb0ca5c6717095424 | [] | no_license | CIPHERGEMS/Phase1 | 6f1b5d64a90150576aaa44e5c9820e56d989088c | 0b477e1fae470a7d053a4650fa1ce967bc629079 | refs/heads/main | 2023-05-27T03:38:12.698519 | 2020-11-10T08:59:49 | 2020-11-10T08:59:49 | 304,770,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,501 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 10 22:12:19 2020
@author: Dave
"""
import os
import pickle
import re
import pandas as pd
from uuid import getnode as get_mac
def load_obj(name ):
with open(name, 'rb') as f:
return pickle.load(f)
def save_obj(obj, name ):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def keep_alphanumeric_space_underscore(string):
string = re.sub(r'[^A-Za-z0-9 ]+', '', string)
string = string.replace (" ", "_")
string = string.replace ("__", "_")
return string
def keep_alphanumeric_space(string):
return re.sub(r'[^A-Za-z0-9 ]+', '', string)
def main():
mac = get_mac()
main_path = "/home/dbrowne/"
if mac == 45015790230696: # dave's laptop
main_path = "C:/Users/dbrowne/"
if mac == 198112232959511: # dave's home
main_path = "C:/Users/dave/"
data_folder = 'Genius'
artist_path_lst = [f.path for f in os.scandir(os.path.join(main_path,'Desktop','music_data',data_folder)) if f.is_dir() ]
year_dict = {}
artist_dict = {}
title_dict = {}
album_dict = {}
for artist_path in artist_path_lst:
_, artist_name = os.path.split(artist_path)
# text file to save artist tracks- id,name,album
out_textfiles = os.path.join(artist_path,'text_song_files')
os.makedirs(out_textfiles,exist_ok=True)
outF = open(os.path.join(artist_path,artist_name+'_tracklist.txt'), "w")
metadata_dict = load_obj(artist_path + '/' +'metadata.pkl')
for song_id,song_meta_dict in metadata_dict.items():
lyrics = song_meta_dict['lyrics']
year = (song_meta_dict['release_date'].split('-'))[0]
album = song_meta_dict['album']
album = keep_alphanumeric_space_underscore(album)
title = song_meta_dict['title']
title = keep_alphanumeric_space_underscore(title)
outF.write(str(song_id)+','+str(title)+','+str(album)+','+str(year)+"\n")
with open(out_textfiles+'/'+'id_'+str(song_id)+'.txt', 'w') as out_songtext:
for liney in lyrics.split('\n'):
liney = re.sub('\\r', '', liney)
liney = liney.encode('ascii',errors='ignore')
out_songtext.write(str(liney.decode('utf-8'))+'\n')
year_dict[song_id] = year
artist_dict[song_id] = artist_name
title_dict[song_id] = title
album_dict[song_id] = album
outF.close()
# write tracklist to excel file
filepath_in = os.path.join(artist_path,artist_name+'_tracklist.txt')
filepath_out = os.path.join(artist_path,artist_name+'_tracklist.xlsx')
pd.read_csv(filepath_in, delimiter=",").to_excel(filepath_out, index=False,
header = ['Song_id','Title','Album','Year'])
save_obj(year_dict, os.path.join(main_path,'Desktop','music_data',data_folder) +'/'+'year_dict')
save_obj(artist_dict, os.path.join(main_path,'Desktop','music_data',data_folder) +'/'+'artist_dict')
save_obj(title_dict, os.path.join(main_path,'Desktop','music_data',data_folder) +'/'+'title_dict')
save_obj(album_dict, os.path.join(main_path,'Desktop','music_data',data_folder) +'/'+'album_dict')
main() | [
"noreply@github.com"
] | noreply@github.com |
f632da1a09f68ac9dc98d92c87a45c3d48be3d42 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_None/trend_PolyTrend/cycle_0/ar_/test_artificial_32_None_PolyTrend_0__20.py | a4acd452378510898a95c0d7c7399843cf84e0e1 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 263 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 0, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 0); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
8105b5e240ed50ebab8d4237de4287212a077d45 | 8c55d93116982758740665fdf93a57d7668d62f3 | /calls/bin/registry-read.py | 077e111f7c9656f2119fe7a8ed3124acc0c3e36b | [] | no_license | Ngahu/Making-web-calls | 42971fbb5835a46237854d45702f7feb50dd9314 | df7e0d9032db914b73a9f19a73be18453e524f6e | refs/heads/master | 2021-07-11T06:20:36.953011 | 2016-09-22T09:22:24 | 2016-09-22T09:22:24 | 68,893,415 | 0 | 1 | null | 2020-07-26T08:34:38 | 2016-09-22T06:55:32 | Python | UTF-8 | Python | false | false | 4,984 | py | #!/SHARED-THINGS/ONGOING/making calls/calls/bin/python
# Copyright (c) 2003-2015 CORE Security Technologies)
#
# This software is provided under under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Author: Alberto Solino (@agsolino)
#
# Description: A Windows Registry Reader Example
#
# Reference for:
# winregistry.py
#
import impacket
from impacket.examples import logger
from impacket import version
from impacket import winregistry
import sys
import argparse
import ntpath
def bootKey(reg):
baseClass = 'ControlSet001\\Control\\Lsa\\'
keys = ['JD','Skew1','GBG','Data']
tmpKey = ''
for key in keys:
tmpKey = tmpKey + reg.getClass(baseClass + key).decode('utf-16le')[:8].decode('hex')
transforms = [ 8, 5, 4, 2, 11, 9, 13, 3, 0, 6, 1, 12, 14, 10, 15, 7 ]
syskey = ''
for i in xrange(len(tmpKey)):
syskey += tmpKey[transforms[i]]
print syskey.encode('hex')
def getClass(reg, className):
regKey = ntpath.dirname(className)
regClass = ntpath.basename(className)
value = reg.getClass(className)
if value is None:
return
print "[%s]" % regKey
print "Value for Class %s: \n" % regClass,
winregistry.hexdump(value,' ')
def getValue(reg, keyValue):
regKey = ntpath.dirname(keyValue)
regValue = ntpath.basename(keyValue)
value = reg.getValue(keyValue)
print "[%s]\n" % regKey
if value is None:
return
print "Value for %s:\n " % regValue,
reg.printValue(value[0],value[1])
def enumValues(reg, searchKey):
key = reg.findKey(searchKey)
if key is None:
return
print "[%s]\n" % searchKey
values = reg.enumValues(key)
for value in values:
print " %-30s: " % (value),
data = reg.getValue('%s\\%s'%(searchKey,value))
# Special case for binary string.. so it looks better formatted
if data[0] == winregistry.REG_BINARY:
print ''
reg.printValue(data[0],data[1])
print ''
else:
reg.printValue(data[0],data[1])
def enumKey(reg, searchKey, isRecursive, indent=' '):
parentKey = reg.findKey(searchKey)
if parentKey is None:
return
keys = reg.enumKey(parentKey)
for key in keys:
print "%s%s" %(indent, key)
if isRecursive is True:
if searchKey == '\\':
enumKey(reg, '\\%s'%(key),isRecursive,indent+' ')
else:
enumKey(reg, '%s\\%s'%(searchKey,key),isRecursive,indent+' ')
def walk(reg, keyName):
return reg.walk(keyName)
def main():
print version.BANNER
parser = argparse.ArgumentParser(add_help = True, description = "Reads data from registry hives.")
parser.add_argument('hive', action='store', help='registry hive to open')
subparsers = parser.add_subparsers(help='actions', dest='action')
# A enum_key command
enumkey_parser = subparsers.add_parser('enum_key', help='enumerates the subkeys of the specified open registry key')
enumkey_parser.add_argument('-name', action='store', required=True, help='registry key')
enumkey_parser.add_argument('-recursive', dest='recursive', action='store_true', required=False, help='recursive search (default False)')
# A enum_values command
enumvalues_parser = subparsers.add_parser('enum_values', help='enumerates the values for the specified open registry key')
enumvalues_parser.add_argument('-name', action='store', required=True, help='registry key')
# A get_value command
getvalue_parser = subparsers.add_parser('get_value', help='retrieves the data for the specified registry value')
getvalue_parser.add_argument('-name', action='store', required=True, help='registry value')
# A get_class command
getclass_parser = subparsers.add_parser('get_class', help='retrieves the data for the specified registry class')
getclass_parser.add_argument('-name', action='store', required=True, help='registry class name')
# A walk command
walk_parser = subparsers.add_parser('walk', help='walks the registry from the name node down')
walk_parser.add_argument('-name', action='store', required=True, help='registry class name to start walking down from')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
options = parser.parse_args()
reg = winregistry.Registry(options.hive)
if options.action.upper() == 'ENUM_KEY':
print "[%s]" % options.name
enumKey(reg, options.name, options.recursive)
elif options.action.upper() == 'ENUM_VALUES':
enumValues(reg, options.name)
elif options.action.upper() == 'GET_VALUE':
getValue(reg, options.name)
elif options.action.upper() == 'GET_CLASS':
getClass(reg, options.name)
elif options.action.upper() == 'WALK':
walk(reg, options.name)
reg.close()
if __name__ == "__main__":
main()
| [
"jamaalaraheem@gmail.com"
] | jamaalaraheem@gmail.com |
2c5fb9bc6be3248ac3b35d7d12190b2ea8d205a5 | b9efe70d12c2cbd55065d02e974f5725534583ee | /old_scripts/show_corpus.py | b687f9489733eacbea05242368defb71cc58c4e7 | [] | no_license | diegoami/bankdomain_PY | 5089581ea7b7db6233243dff305488ff27dc8e90 | 83816e1beb96d3e9e0f746bec7f9db9521f32ee7 | refs/heads/master | 2022-12-17T05:05:13.557911 | 2020-06-03T22:19:44 | 2020-06-03T22:19:44 | 131,530,574 | 0 | 0 | null | 2022-12-08T01:30:27 | 2018-04-29T21:12:25 | HTML | UTF-8 | Python | false | false | 996 | py |
import yaml
from repository.mongo_ops import copy_into_qa_documents, split_qa_documents_into_questions, print_all_questions, iterate_questions_in_mongo
from preprocess.preprocessor import create_corpus, load_corpus, print_corpus
from language.custom_lemmas import my_component
from textacy.corpus import Corpus
import spacy
if __name__ == '__main__':
config = yaml.safe_load(open("config.yml"))
data_dir = config['data_dir']
mongo_connection = config['mongo_connection']
corpus_out_dir = config['corpus_dir']
corpus_filename = config['corpus_filename']
corpus_proc_filename = config['corpus_proc_filename']
corpus = load_corpus(corpus_out_dir+'/'+corpus_proc_filename)
new_corpus = Corpus('de')
new_corpus.spacy_lang.add_pipe(my_component, name='print_length', last=True)
new_corpus.add_texts([doc.text for doc in corpus] )
#print_corpus(corpus)
# corpus.spacy_vocab
print(new_corpus.word_doc_freqs(normalize=u'lemma', as_strings=True))
| [
"diego.amicabile@gmail.com"
] | diego.amicabile@gmail.com |
e5b88d1db6c5e84ccaaeb489af96251143efc9a7 | df609b4ab0616e43060b6e60b2871804889f7cd2 | /books/views.py | 11b18b2b304c84c4dad909f1bf2dd528d56d4e71 | [] | no_license | Bruce6110/bookstore_project | 7f9135f5d231aa3ddb4f60dc141e81170ec47ca0 | 1f8f64478a11e1ed63b19c9ce5a47f59089c2265 | refs/heads/master | 2020-12-20T07:27:19.368330 | 2020-03-08T17:32:17 | 2020-03-08T17:32:17 | 236,001,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 881 | py | from django.contrib.auth.mixins import (
LoginRequiredMixin, PermissionRequiredMixin)
from django.shortcuts import render
from django.views.generic import ListView, DetailView
from .models import Book
# Create your views here.
class BookListView(LoginRequiredMixin, PermissionRequiredMixin, ListView):
model = Book
context_object_name = 'book_list' # new. overrides 'object_list'
template_name = 'books/book_list.html'
login_url = 'account_login'
permission_required = 'books.special_status'
class BookDetailView(LoginRequiredMixin, DetailView):
model = Book
context_object_name = 'book' # new. overrides 'object'
template_name = 'books/book_detail.html'
login_url = 'account_login' # new for loginrequiredmixin
# new for permission required (name of the permission we created)
permission_required = 'books.special_status'
| [
"bruce6110@yahoo.com"
] | bruce6110@yahoo.com |
001eeb153eb08184386c8e3e4f2a4d9c7f6da5d3 | 9be1ab6f7cc9e1e8474b7c76ef89284b54782c46 | /chapter3_if_loops/exercise1.py | a681e3d606a2dc9be86b07992c25292f6194d43d | [] | no_license | Nateque123/python_tutorials | 8d9842d46570e6cecd7aa5419b9f77bc4468d391 | 83743acf4862155c5837c154d0422f74d0629043 | refs/heads/master | 2022-11-20T11:39:02.565456 | 2020-07-24T11:08:34 | 2020-07-24T11:08:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 283 | py | # Guess number by taking one number from user without loop.
unum = int(input("Enter your number: "))
winning_num = 5
if unum == winning_num:
print("You won! Congrats")
else:
if unum < winning_num:
print("Increase number")
else:
print("Decrease number")
| [
"nateque999@gmail.com"
] | nateque999@gmail.com |
08da355ed5009788d673daf96c0f5f8075c62524 | 77ab53380f74c33bb3aacee8effc0e186b63c3d6 | /720_longest_word_in_dictionary.py | b1ef8b4f98d24725eeb93e621ed887835df90cb5 | [] | no_license | tabletenniser/leetcode | 8e3aa1b4df1b79364eb5ca3a97db57e0371250b6 | d3ebbfe2e4ab87d5b44bc534984dfa453e34efbd | refs/heads/master | 2023-02-23T18:14:31.577455 | 2023-02-06T07:09:54 | 2023-02-06T07:09:54 | 94,496,986 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | '''
Given a list of strings words representing an English Dictionary, find the longest word in words that can be built one character at a time by other words in words. If there is more than one possible answer, return the longest word with the smallest lexicographical order.
If there is no answer, return the empty string.
Example 1:
Input:
words = ["w","wo","wor","worl", "world"]
Output: "world"
Explanation:
The word "world" can be built one character at a time by "w", "wo", "wor", and "worl".
Example 2:
Input:
words = ["a", "banana", "app", "appl", "ap", "apply", "apple"]
Output: "apple"
Explanation:
Both "apply" and "apple" can be built from other words in the dictionary. However, "apple" is lexicographically smaller than "apply".
Note:
All the strings in the input will only contain lowercase letters.
The length of words will be in the range [1, 1000].
The length of words[i] will be in the range [1, 30].
'''
class Solution(object):
def longestWord(self, words):
"""
:type words: List[str]
:rtype: str
"""
words.sort()
w_dict = set()
result = ''
for w in words:
w_dict.add(w)
for w in words:
can_be_built = True
for i in xrange(1, len(w)):
if w[:i] not in w_dict:
can_be_built = False
break
if can_be_built and len(w) > len(result):
result = w
return result
s = Solution()
print s.longestWord(["w","wo","wor","worl", "world"])
print s.longestWord(["a", "banana", "app", "appl", "ap", "apply", "apple"])
| [
"tabletenniser@gmail.com"
] | tabletenniser@gmail.com |
dfd501bb33df129a01c969bde6725ef09d1325c5 | 8c33efc07f6612d16b94112cc1b48e0494ef850d | /lib/test.py | db1bbec354a86a00c0e2ee5fbba1d45344f4f681 | [] | no_license | PSanni/Python_Bank_Wrapper | 0d4b6a4936ec6c898a125351438e28335f2feb0f | fbeaa1a8e4dae261a34137571dc48df54073085f | refs/heads/master | 2021-01-11T09:11:42.571943 | 2016-12-23T17:25:32 | 2016-12-23T17:25:32 | 77,239,774 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | from IFSCWrapper import Wrapper
from IFSCException import IFSCWrapperException
obj=Wrapper()
try:
obj.init()
print(obj.getBankList())
except IFSCWrapperException as es:
print(es)
| [
"patel.sanni5@gmail.com"
] | patel.sanni5@gmail.com |
be8c540a99b53bbefe14a2c997460b36892e6584 | 994a6ef944617213227a9231d54d1543ba5fd282 | /examples/mnist_test2.py | fdcebd3c9ab45ecc848b635ce582a0a77f58a66c | [
"MIT"
] | permissive | dli7319/cmsc498v_convex | 7360503536269d580db21ffdbb4ccc1de31d296f | 5578b8f07575860a93eb4894ce9f7b224c90cba9 | refs/heads/master | 2020-04-12T11:57:26.747680 | 2018-12-19T20:23:08 | 2018-12-19T20:23:08 | 162,477,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,276 | py | import waitGPU
# import setGPU
# waitGPU.wait(utilization=50, available_memory=5000, interval=60)
# waitGPU.wait(gpu_ids=[0], utilization=20, available_memory=5000, interval=60)
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
# cudnn.benchmark = True
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import setproctitle
import problems as pblm
from trainer import *
import math
import numpy as np
import datetime
def select_model(m):
if m == 'large':
model = pblm.mnist_model_large().cuda()
_, test_loader = pblm.mnist_loaders(8)
elif m == 'wide':
print("Using wide model with model_factor={}".format(args.model_factor))
_, test_loader = pblm.mnist_loaders(64//args.model_factor)
model = pblm.mnist_model_wide(args.model_factor).cuda()
elif m == 'deep':
print("Using deep model with model_factor={}".format(args.model_factor))
_, test_loader = pblm.mnist_loaders(64//(2**args.model_factor))
model = pblm.mnist_model_deep(args.model_factor).cuda()
else:
model = pblm.mnist_model().cuda()
return model
if __name__ == "__main__":
for method in ["robust", "mix"]:
waitGPU.wait(utilization=20, available_memory=6000, interval=2)
args = pblm.argparser(opt='adam', verbose=200, starting_epsilon=0.01)
args.method = method
args.batch_size = 30
args.test_batch_size = 20
args.prefix = "test2/" + args.method + ""
args.epochs = 50
print(args)
print("saving file to {}".format(args.prefix))
setproctitle.setproctitle(args.prefix)
train_log = open(args.prefix + "_train.log", "w")
baseline_test_log = open(args.prefix + "_baseline_test.log", "w")
madry_test_log = open(args.prefix + "_madry_test.log", "w")
robust_test_log = open(args.prefix + "_robust_test.log", "w")
full_test_log = open(args.prefix + "_full_test.log", "w")
train_loader, _ = pblm.mnist_loaders(args.batch_size)
_, test_loader = pblm.mnist_loaders(args.test_batch_size)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
for X,y in train_loader:
break
kwargs = pblm.args2kwargs(args, X=Variable(X.cuda()))
best_err = 1
sampler_indices = []
model = [select_model(args.model)]
start_time = datetime.datetime.now()
for _ in range(0,args.cascade):
if _ > 0:
# reduce dataset to just uncertified examples
print("Reducing dataset...")
train_loader = sampler_robust_cascade(train_loader, model, args.epsilon,
args.test_batch_size,
norm_type=args.norm_test, bounded_input=True, **kwargs)
if train_loader is None:
print('No more examples, terminating')
break
sampler_indices.append(train_loader.sampler.indices)
print("Adding a new model")
model.append(select_model(args.model))
if args.opt == 'adam':
opt = optim.Adam(model[-1].parameters(), lr=args.lr)
elif args.opt == 'sgd':
opt = optim.SGD(model[-1].parameters(), lr=args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
else:
raise ValueError("Unknown optimizer")
lr_scheduler = optim.lr_scheduler.StepLR(opt, step_size=10, gamma=0.5)
eps_schedule = np.linspace(args.starting_epsilon,
args.epsilon,
args.schedule_length)
for t in range(args.epochs):
lr_scheduler.step(epoch=max(t-len(eps_schedule), 0))
# if t < len(eps_schedule) and args.starting_epsilon is not None:
# epsilon = float(eps_schedule[t])
# else:
# epsilon = args.epsilon
epsilon = args.epsilon
# standard training
if args.method == "baseline":
train_baseline(train_loader, model[0], opt, t, train_log,
args.verbose)
elif args.method == "madry":
train_madry(train_loader, model[0], args.epsilon,
opt, t, train_log, args.verbose)
elif args.method == "robust":
train_robust(train_loader, model[0], opt, epsilon, t,
train_log, args.verbose, args.real_time,
norm_type=args.norm_train, bounded_input=True, **kwargs)
elif args.method == "mix":
if t < 20:
train_madry(train_loader, model[0], args.epsilon,
opt, t, train_log, args.verbose)
else:
train_robust(train_loader, model[0], opt, args.epsilon, t,
train_log, args.verbose, args.real_time,
norm_type=args.norm_train, bounded_input=True, **kwargs)
time_diff = datetime.datetime.now() - start_time
print("Train Time Diff")
print(time_diff.total_seconds())
baseline_err = evaluate_baseline(test_loader, model[0], t, baseline_test_log,
args.verbose)
madry_err = evaluate_madry(test_loader, model[0], args.epsilon,
t, madry_test_log, args.verbose)
robust_err = evaluate_robust(test_loader, model[0], args.epsilon,
t, robust_test_log, args.verbose, args.real_time,
norm_type=args.norm_test, bounded_input=True, **kwargs)
err = robust_err
start_time = datetime.datetime.now() - time_diff
print(time_diff.total_seconds(), baseline_err, madry_err, robust_err ,file=full_test_log)
print("err")
print(err)
print("best_err")
print(best_err)
if err < best_err:
best_err = err
torch.save({
'state_dict' : [m.state_dict() for m in model],
'err' : best_err,
'epoch' : t,
'sampler_indices' : sampler_indices
}, args.prefix + "_best.pth")
torch.save({
'state_dict': [m.state_dict() for m in model],
'err' : err,
'epoch' : t,
'sampler_indices' : sampler_indices
}, args.prefix + "_checkpoint.pth")
if time_diff.total_seconds() > 8000:
break
| [
"dli7319@gmail.com"
] | dli7319@gmail.com |
e4974989c28e06102fb2dec94de7078ee824532b | 4105c370b01f96689750c19bb76bf9242cfed58e | /map_locations/migrations/0007_auto_20191228_1902.py | 6fa525a94523978cefa79bfbd7f87e08c7b5e6c0 | [] | no_license | jstir123/TravelSite | 531c33e645bda5316ff37ddca6c57f30d45347bf | b2e1422a3f9bf2e926f25e7589bbcd6d3972b5cb | refs/heads/master | 2022-04-08T13:39:10.777971 | 2020-02-02T16:35:07 | 2020-02-02T16:35:07 | 237,797,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 603 | py | # Generated by Django 2.2.8 on 2019-12-28 19:02
from django.db import migrations, models
import django_countries.fields
class Migration(migrations.Migration):
dependencies = [
('map_locations', '0006_auto_20191226_2048'),
]
operations = [
migrations.AddField(
model_name='trips',
name='state',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='trips',
name='country',
field=django_countries.fields.CountryField(max_length=2),
),
]
| [
"jstir123@gmail.com"
] | jstir123@gmail.com |
14130f9213698f332277814fe64761e8dda9cdf0 | 61276745d10e75c2fa79db890e1d4713623e8fa8 | /honeypot_money/honeypot_money/settings.py | 1d16597dbb935bdc341aa521aa331ceb3ed3b0d1 | [] | no_license | zoek1/honeypot_money | 7458787b568424052cbaade0d0bc247b5a056f17 | 4095df11a02ef1dfc3acebb9740b59e9efab3e79 | refs/heads/master | 2023-04-17T18:46:45.014291 | 2021-04-30T06:51:24 | 2021-04-30T06:51:24 | 360,924,936 | 0 | 0 | null | 2021-04-23T15:16:04 | 2021-04-23T15:16:03 | null | UTF-8 | Python | false | false | 4,101 | py | """
Django settings for honeypot_money project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import environ
from pathlib import Path
env = environ.Env()
# reading .env file
environ.Env.read_env()
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# local apps
'campaigns'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'honeypot_money.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'honeypot_money.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
BLOCKCHAIN_NETWORK = env('BLOCKCHAIN_NETWORK')
INFURA_API_KEY = env('INFURA_API_KEY')
POP_BASE_SITE = 'http://proofofpersonhood.com/'
INCREASE_TRUST_SITE = 'https://gitcoin.co/profile/trust'
NULL_SCORE = -1
MINIMUM_SCORE = 30
# TEST NETWORK
if BLOCKCHAIN_NETWORK == 'TEST':
NETWORK_NAME = 'Optimistic Ethereum Kovan'
NETWORK_URL = 'https://kovan.optimism.io'
NETWORK_CHAIN_ID = 69
NETWORK_EXPLORER = 'https://kovan-l2-explorer.surge.sh'
# Person Hood Score
POP_NETWORK = 'rinkeby'
if BLOCKCHAIN_NETWORK == 'MAINNET':
NETWORK_NAME = 'Optimistic Ethereum'
NETWORK_URL = 'https://mainnet.optimism.io'
NETWORK_CHAIN_ID = 10
NETWORK_EXPLORER = 'https://mainnet-l2-explorer.surge.sh'
# Person Hood Score
POP_NETWORK = 'mainnet'
| [
"miguel@gordian.dev"
] | miguel@gordian.dev |
328db55556c99db0397b257b2b8409cbd026e942 | a3120b88d457e917d647e871d5e1e26bda69b2bc | /코테전문제풀이/11725_트리의부모찾기.py | 0f472fc650a16697350aaca347d8975210fa09ce | [] | no_license | MMyungji/algorithm2 | 91d5c6860f38096e9ed8bfde897760da8295e4d7 | d8c4b6dd318a8ac1b25c7ff6e24f1c5181b3a3eb | refs/heads/main | 2023-08-15T22:05:43.570267 | 2021-10-22T11:12:31 | 2021-10-22T11:12:31 | 374,083,658 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | # 트리를 그래프로 구현 -> 부모노드 찾기(dfs,bfs)
# 그래프처럼 순회하지 않고 한방향으로만 내려가므로 visited 리스트 필요없음
import sys
input = sys.stdin.readline
N = int(input())
graph_tree = [[] for _ in range(N+1)]
parents = [[] for _ in range(N+1)]
for _ in range(N-1):
a,b = map(int,input().split())
graph_tree[a].append(b)
graph_tree[b].append(a)
stack = [1]
while stack:
node = stack.pop()
for i in graph_tree[node]:
stack.append(i)
graph_tree[i].remove(node)
parents[i].append(node)
for i in range(2,N+1):
print(parents[i][0])
# 런타임에러
# N = int(input())
# dic = {1:0}
# for _ in range(N-1):
# a,b = map(int,input().split())
# if a in dic:
# dic[b]=a
# else:
# dic[a]=b
# for i in range(2,N+1):
# print(dic[i]) | [
"myungji@jeongmyeongjiui-MacBook-Pro-2.local"
] | myungji@jeongmyeongjiui-MacBook-Pro-2.local |
4a7b23545013b057482b2759fce65e737b51d55d | 2ed68b4945ba3226ffb9cb8666b26ccc319e994e | /student_project/settings.py | 0a84e90ecf27089b81b347445888ef21f1eae8bb | [] | no_license | SamuelJa/University_Django_DI | ea03757aeeb5f1ab491d274239879fc0f1046636 | 44ee471557697d0f6530943894f53bc872617638 | refs/heads/master | 2020-04-11T08:05:12.315988 | 2018-12-13T12:02:46 | 2018-12-13T12:02:46 | 161,631,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,332 | py | """
Django settings for student_project project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SITE_ROOT= os.path.dirname(os.path.realpath(__file__))
STATIC_DIT= os.path.join(SITE_ROOT, 'static/')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n&!9_jm1%x&n4g_4s62+%r(lcb+(1rz4&%!q=+j*-vuqoh=hpl'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'student_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'student_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'student_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'university',
'USER':'postgres',
'PASSWORD': '12345',
'HOST': 'localhost','PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
#STATIC_DIRS = [
#STATIC_DIR,
#]
| [
"samos@MacBook-Pro-de-Samuel.local"
] | samos@MacBook-Pro-de-Samuel.local |
0841367580c591daa0a371cc7528b12976f2c8e3 | e937b9962982aeef2476d8b2362907be787f93b1 | /dayByDay/day4/day4.py | 6e93d8bf9f824ff5763e4c4a7af0f9aab2463528 | [] | no_license | wanglanqing/Python_Project | e3bef6d5794d5181a0b85d09700ae90a3b0779f3 | 13241cbf22f2e0c7a52ad2b969744ad40dd68085 | refs/heads/master | 2021-09-08T01:36:14.849587 | 2018-03-05T10:17:38 | 2018-03-05T10:17:38 | 109,321,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py | '''
输入某年某月某日,判断这一天是这一年的第几天?
'''
from day4.day4exception import *
class GetDays(object):
def __init__(self):
self.date_dict = {}
def get_date(self,year,month,day):
self.year = int(year)
self.month = int(month)
self.day = int(day)
# self.target_year = int(input('请输入指定的年:'))
if self.year <= 0:
raise LowThanZero()
self.date_dict['year'] = self.year
# self.target_month = int(input('请输入指定的月:'))
if self.month <=0 :
raise LowThanZero()
elif self.month > 12:
raise GreatThan12()
self.date_dict['month'] = self.month
# self.target_day = int(input('请输入日:'))
if self.day <= 0:
raise LowThanZero()
elif self.day >31:
raise GreatThan31()
self.date_dict['day'] = self.day
return self.date_dict
def is_leap_year(self):
if (self.date_dict['year'] % 4 ==0) and (self.date_dict['year'] % 100 !=0):
return True
def get_days(self):
dayue = [1, 3, 5, 7, 8, 10, 12]
xiaoyue = [4, 6, 9, 11]
days = 0
for i in range(1, self.date_dict['month']):
if i in dayue:
tmpdays = 31
elif i in xiaoyue:
tmpdays = 30
elif self.is_leap_year():
tmpdays = 29
else:
tmpdays = 28
days += tmpdays
days = days + self.date_dict['day']
print(days)
return days
if __name__ == '__main__':
try:
gd = GetDays()
gd.get_date(2012.7,12,31)
gd.get_days()
except Exception as e:
print(e) | [
"wanglanqing1122@126.com"
] | wanglanqing1122@126.com |
904e8d80323fae614cfb5f3fb04f3f7c5945a920 | 826f90a6c8aa0806fa28368bc1be382c69e4471e | /learning/old/test3.py | ba59005898516b9d1f962ac3646d1b9bf084cef8 | [
"Apache-2.0"
] | permissive | TinaCloud/CS229-project | 6153be2e7110eb64789e81ffc2cd20bd78e98579 | 5c8ac4774ef8441976d3c7f7e8d959830f34092f | refs/heads/master | 2021-05-28T15:36:08.764347 | 2013-12-17T22:54:03 | 2013-12-17T22:54:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,522 | py | from __future__ import division
import numpy as np
from glob import glob
import os.path
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
import sklearn.svm
from sklearn.utils import shuffle
# Get the secure Jos data
jos_sec_set = set()
with open('../data/Josquin_secure.txt') as f:
for line in f:
jos_sec_set.add(line.strip())
features = []
labels = []
names = []
for name in glob('../data/correct_data/*feature1.npy'):
arr = np.load(name)
name = os.path.basename(name).split('_')[0]
if name[:3] != 'Jos': # Non-Jos composer
label = 0
elif name in jos_sec_set: # Secure Jos
label = 1
else:
continue
labels.append(label)
features.append(arr)
names.append(name)
labels, features, names = shuffle(labels, features, names)
clf = LogisticRegression()
#clf = MultinomialNB()
#clf = sklearn.svm.SVC()
#clf = sklearn.svm.LinearSVC()
from sklearn.metrics import precision_score, accuracy_score
accu = []
prec = []
cv = cross_validation.StratifiedKFold(labels, n_folds=10)
for train, test in cv:
clf.fit(features[train], labels[train])
pred = clf.predict(features[test])
accu.append(accuracy_score(labels[test], pred))
prec.append(precision_score(labels[test], pred))
print np.average(accu), np.average(prec)
'''
scores = cross_validation.cross_val_score(
clf, features, labels, "average_precision", cv=cv, n_jobs=-1
)
print np.average(scores)
'''
| [
"fcchou@stanford.edu"
] | fcchou@stanford.edu |
ae998783f6f09edee5eb0409239e0811735c2f57 | 141b42d9d72636c869ff2ce7a2a9f7b9b24f508b | /myvenv/Lib/site-packages/phonenumbers/data/region_SJ.py | 30448b9dce8f8518c9cc53db0649a80ffccfe27c | [
"BSD-3-Clause"
] | permissive | Fa67/saleor-shop | 105e1147e60396ddab6f006337436dcbf18e8fe1 | 76110349162c54c8bfcae61983bb59ba8fb0f778 | refs/heads/master | 2021-06-08T23:51:12.251457 | 2018-07-24T08:14:33 | 2018-07-24T08:14:33 | 168,561,915 | 1 | 0 | BSD-3-Clause | 2021-04-18T07:59:12 | 2019-01-31T17:00:39 | Python | UTF-8 | Python | false | false | 1,464 | py | """Auto-generated file, do not edit by hand. SJ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_SJ = PhoneMetadata(id='SJ', country_code=47, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='0\\d{4}|[45789]\\d{7}', possible_length=(5, 8)),
fixed_line=PhoneNumberDesc(national_number_pattern='79\\d{6}', example_number='79123456', possible_length=(8,)),
mobile=PhoneNumberDesc(national_number_pattern='(?:4[015-8]|5[89]|9\\d)\\d{6}', example_number='41234567', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='80[01]\\d{5}', example_number='80012345', possible_length=(8,)),
premium_rate=PhoneNumberDesc(national_number_pattern='82[09]\\d{5}', example_number='82012345', possible_length=(8,)),
shared_cost=PhoneNumberDesc(national_number_pattern='810(?:0[0-6]|[2-8]\\d)\\d{3}', example_number='81021234', possible_length=(8,)),
personal_number=PhoneNumberDesc(national_number_pattern='880\\d{5}', example_number='88012345', possible_length=(8,)),
voip=PhoneNumberDesc(national_number_pattern='85[0-5]\\d{5}', example_number='85012345', possible_length=(8,)),
uan=PhoneNumberDesc(national_number_pattern='0\\d{4}|81(?:0(?:0[7-9]|1\\d)|5\\d{2})\\d{3}', example_number='01234', possible_length=(5, 8)),
voicemail=PhoneNumberDesc(national_number_pattern='81[23]\\d{5}', example_number='81212345', possible_length=(8,)))
| [
"gruzdevasch@gmail.com"
] | gruzdevasch@gmail.com |
60746f163888644e6f853c4952f956ee3b25ab2a | 4d80c481f6d2271e4143c69cde5306d25ab6d737 | /A01/P02/P02E03.py | 861cd9c5d490ebe300c9c6b90ecbafdbfc6ceb8f | [] | no_license | snebotcifpfbmoll/DAMProgramacion | c0156099c1eb1a3e40a108b9e3acea13bf438531 | 6dea9dd76c2e7283369bb6cd945e71e000c436ec | refs/heads/master | 2020-08-08T04:35:40.559872 | 2020-05-07T09:30:34 | 2020-05-07T09:30:34 | 213,715,033 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | # P02E03: Serafi Nebot Ginard
num = int(input("Escribe un numero: "))
if num % 2 == 0:
print("El numero es par.")
else:
print("El numero no es par")
| [
"snebot@gmail.com"
] | snebot@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.